From b3e57ca3e5da662da0d2aa20318f48b62d752359 Mon Sep 17 00:00:00 2001 From: Pepe Fagoaga Date: Wed, 23 Nov 2022 15:53:53 +0100 Subject: [PATCH] feat(compliance): Loader and Execute (#1465) --- .pre-commit-config.yaml | 30 +- check_sample.metadata.json | 14 +- compliance/{ => aws}/cis_1.4_aws.json | 0 compliance/{ => aws}/cis_1.5_aws.json | 0 compliance/aws/ens_rd2022_aws.json | 1636 +++++++++++++++ compliance/ens_rd2022_aws.json | 1793 ----------------- config/config.py | 3 + lib/check/check.py | 135 +- lib/check/check_test.py | 86 +- lib/check/checks_loader.py | 26 +- lib/check/compliance.py | 43 + lib/check/compliance_models.py | 75 + lib/check/fixtures/aws/cis_v1.4_aws.json | 82 + lib/check/fixtures/aws/ens_v3_aws.json | 82 + lib/check/models.py | 190 +- lib/outputs/models.py | 31 +- lib/outputs/outputs.py | 435 ++-- lib/outputs/outputs_test.py | 26 +- lib/utils/utils.py | 19 +- ...zer_enabled_without_findings.metadata.json | 14 +- ...accessanalyzer_enabled_without_findings.py | 2 +- ...tain_current_contact_details.metadata.json | 7 +- ...ccount_maintain_current_contact_details.py | 2 +- ...ct_information_is_registered.metadata.json | 7 +- ...urity_contact_information_is_registered.py | 2 +- ...egistered_in_the_aws_account.metadata.json | 7 +- ...tions_are_registered_in_the_aws_account.py | 2 +- ...ertificates_expiration_check.metadata.json | 7 +- .../acm_certificates_expiration_check.py | 2 +- ...es_transparency_logs_enabled.metadata.json | 7 +- ..._certificates_transparency_logs_enabled.py | 2 +- ...igateway_authorizers_enabled.metadata.json | 7 +- .../apigateway_authorizers_enabled.py | 2 +- ...y_client_certificate_enabled.metadata.json | 7 +- .../apigateway_client_certificate_enabled.py | 2 +- .../apigateway_endpoint_public.metadata.json | 7 +- .../apigateway_endpoint_public.py | 2 +- .../apigateway_logging_enabled.metadata.json | 7 +- .../apigateway_logging_enabled.py | 2 +- .../apigateway_waf_acl_attached.metadata.json | 7 +- .../apigateway_waf_acl_attached.py | 2 +- ...wayv2_access_logging_enabled.metadata.json | 9 +- .../apigatewayv2_access_logging_enabled.py | 2 +- ...atewayv2_authorizers_enabled.metadata.json | 7 +- .../apigatewayv2_authorizers_enabled.py | 2 +- ...ult_internet_access_disabled.metadata.json | 70 +- ..._fleet_default_internet_access_disabled.py | 2 +- ...eet_maximum_session_duration.metadata.json | 68 +- ...ppstream_fleet_maximum_session_duration.py | 2 +- ...t_session_disconnect_timeout.metadata.json | 70 +- ...stream_fleet_session_disconnect_timeout.py | 2 +- ...sion_idle_disconnect_timeout.metadata.json | 70 +- ...m_fleet_session_idle_disconnect_timeout.py | 2 +- ...ets_ec2_launch_configuration.metadata.json | 8 +- ...g_find_secrets_ec2_launch_configuration.py | 2 +- ...s_cloudtrail_logging_enabled.metadata.json | 5 +- ...i_operations_cloudtrail_logging_enabled.py | 2 +- ..._function_no_secrets_in_code.metadata.json | 5 +- .../awslambda_function_no_secrets_in_code.py | 2 +- ...tion_no_secrets_in_variables.metadata.json | 5 +- ...lambda_function_no_secrets_in_variables.py | 2 +- ...tion_not_publicly_accessible.metadata.json | 5 +- ...lambda_function_not_publicly_accessible.py | 2 +- ...bda_function_url_cors_policy.metadata.json | 5 +- .../awslambda_function_url_cors_policy.py | 2 +- ...wslambda_function_url_public.metadata.json | 5 +- .../awslambda_function_url_public.py | 2 +- ...ion_using_supported_runtimes.metadata.json | 5 +- ...ambda_function_using_supported_runtimes.py | 2 +- ...rmation_outputs_find_secrets.metadata.json | 3 +- .../cloudformation_outputs_find_secrets.py | 2 +- ...rmination_protection_enabled.metadata.json | 3 +- ...n_stacks_termination_protection_enabled.py | 2 +- ...eld_level_encryption_enabled.metadata.json | 7 +- ...ibutions_field_level_encryption_enabled.py | 2 +- ...ons_geo_restrictions_enabled.metadata.json | 7 +- ..._distributions_geo_restrictions_enabled.py | 2 +- ..._distributions_https_enabled.metadata.json | 7 +- .../cloudfront_distributions_https_enabled.py | 2 +- ...istributions_logging_enabled.metadata.json | 7 +- ...loudfront_distributions_logging_enabled.py | 2 +- ...ing_deprecated_ssl_protocols.metadata.json | 7 +- ...ibutions_using_deprecated_ssl_protocols.py | 2 +- ...ront_distributions_using_waf.metadata.json | 7 +- .../cloudfront_distributions_using_waf.py | 2 +- ...l_cloudwatch_logging_enabled.metadata.json | 67 +- .../cloudtrail_cloudwatch_logging_enabled.py | 2 +- ...trail_kms_encryption_enabled.metadata.json | 69 +- .../cloudtrail_kms_encryption_enabled.py | 2 +- ..._log_file_validation_enabled.metadata.json | 69 +- .../cloudtrail_log_file_validation_enabled.py | 2 +- ...ucket_access_logging_enabled.metadata.json | 69 +- ...l_logs_s3_bucket_access_logging_enabled.py | 2 +- ...t_is_not_publicly_accessible.metadata.json | 69 +- ...gs_s3_bucket_is_not_publicly_accessible.py | 2 +- ...udtrail_multi_region_enabled.metadata.json | 80 +- .../cloudtrail_multi_region_enabled.py | 2 +- ...l_s3_dataevents_read_enabled.metadata.json | 3 +- .../cloudtrail_s3_dataevents_read_enabled.py | 2 +- ..._s3_dataevents_write_enabled.metadata.json | 3 +- .../cloudtrail_s3_dataevents_write_enabled.py | 2 +- ...etwork_acls_alarm_configured.metadata.json | 7 +- ...hanges_to_network_acls_alarm_configured.py | 2 +- ...rk_gateways_alarm_configured.metadata.json | 7 +- ...es_to_network_gateways_alarm_configured.py | 2 +- ...oute_tables_alarm_configured.metadata.json | 7 +- ...o_network_route_tables_alarm_configured.py | 2 +- ...ges_to_vpcs_alarm_configured.metadata.json | 7 +- ...dwatch_changes_to_vpcs_alarm_configured.py | 2 +- ...oss_account_sharing_disabled.metadata.json | 7 +- ...oudwatch_cross_account_sharing_disabled.py | 2 +- ...group_kms_encryption_enabled.metadata.json | 7 +- ...dwatch_log_group_kms_encryption_enabled.py | 2 +- ...policy_specific_days_enabled.metadata.json | 7 +- ..._retention_policy_specific_days_enabled.py | 2 +- ...onfiguration_changes_enabled.metadata.json | 7 +- ...ws_config_configuration_changes_enabled.py | 2 +- ...onfiguration_changes_enabled.metadata.json | 7 +- ...loudtrail_configuration_changes_enabled.py | 2 +- ...lter_authentication_failures.metadata.json | 7 +- ...g_metric_filter_authentication_failures.py | 2 +- ...er_aws_organizations_changes.metadata.json | 7 +- ...metric_filter_aws_organizations_changes.py | 2 +- ...cheduled_deletion_of_kms_cmk.metadata.json | 7 +- ...isable_or_scheduled_deletion_of_kms_cmk.py | 2 +- ...for_s3_bucket_policy_changes.metadata.json | 7 +- ...ric_filter_for_s3_bucket_policy_changes.py | 2 +- ...metric_filter_policy_changes.metadata.json | 7 +- ...dwatch_log_metric_filter_policy_changes.py | 2 +- ...log_metric_filter_root_usage.metadata.json | 7 +- ...cloudwatch_log_metric_filter_root_usage.py | 2 +- ...ilter_security_group_changes.metadata.json | 7 +- ...og_metric_filter_security_group_changes.py | 2 +- ...c_filter_sign_in_without_mfa.metadata.json | 7 +- ...h_log_metric_filter_sign_in_without_mfa.py | 2 +- ...ilter_unauthorized_api_calls.metadata.json | 7 +- ...og_metric_filter_unauthorized_api_calls.py | 2 +- ...l_public_publishing_disabled.metadata.json | 3 +- ...ges_external_public_publishing_disabled.py | 2 +- ...ebuild_project_older_90_days.metadata.json | 68 +- .../codebuild_project_older_90_days.py | 2 +- ...ct_user_controlled_buildspec.metadata.json | 68 +- ...build_project_user_controlled_buildspec.py | 2 +- ...recorder_all_regions_enabled.metadata.json | 7 +- .../config_recorder_all_regions_enabled.py | 4 +- ...ctory_log_forwarding_enabled.metadata.json | 3 +- ...ervice_directory_log_forwarding_enabled.py | 2 +- ...ectory_monitor_notifications.metadata.json | 3 +- ...service_directory_monitor_notifications.py | 2 +- ...ce_directory_snapshots_limit.metadata.json | 3 +- ...ectoryservice_directory_snapshots_limit.py | 2 +- ..._ldap_certificate_expiration.metadata.json | 3 +- ...toryservice_ldap_certificate_expiration.py | 2 +- ...ius_server_security_protocol.metadata.json | 3 +- ...service_radius_server_security_protocol.py | 2 +- ...supported_mfa_radius_enabled.metadata.json | 3 +- ...oryservice_supported_mfa_radius_enabled.py | 2 +- ...r_cluster_encryption_enabled.metadata.json | 7 +- ..._accelerator_cluster_encryption_enabled.py | 2 +- ...s_kms_cmk_encryption_enabled.metadata.json | 7 +- ...amodb_tables_kms_cmk_encryption_enabled.py | 2 +- ...dynamodb_tables_pitr_enabled.metadata.json | 7 +- .../dynamodb_tables_pitr_enabled.py | 2 +- .../ec2_ami_public.metadata.json | 7 +- .../ec2/ec2_ami_public/ec2_ami_public.py | 2 +- .../ec2_ebs_default_encryption.metadata.json | 7 +- .../ec2_ebs_default_encryption.py | 2 +- .../ec2_ebs_public_snapshot.metadata.json | 7 +- .../ec2_ebs_public_snapshot.py | 2 +- .../ec2_ebs_snapshots_encrypted.metadata.json | 7 +- .../ec2_ebs_snapshots_encrypted.py | 2 +- .../ec2_ebs_volume_encryption.metadata.json | 7 +- .../ec2_ebs_volume_encryption.py | 2 +- .../ec2_elastic_ip_shodan.metadata.json | 7 +- .../ec2_elastic_ip_shodan.py | 2 +- .../ec2_elastic_ip_unassgined.metadata.json | 7 +- .../ec2_elastic_ip_unassgined.py | 2 +- .../ec2_instance_imdsv2_enabled.metadata.json | 7 +- .../ec2_instance_imdsv2_enabled.py | 2 +- ...facing_with_instance_profile.metadata.json | 7 +- ...e_internet_facing_with_instance_profile.py | 2 +- .../ec2_instance_managed_by_ssm.metadata.json | 7 +- .../ec2_instance_managed_by_ssm.py | 2 +- ...nce_older_than_specific_days.metadata.json | 7 +- .../ec2_instance_older_than_specific_days.py | 2 +- ...c2_instance_profile_attached.metadata.json | 3 +- .../ec2_instance_profile_attached.py | 2 +- .../ec2_instance_public_ip.metadata.json | 7 +- .../ec2_instance_public_ip.py | 2 +- ...2_instance_secrets_user_data.metadata.json | 8 +- .../ec2_instance_secrets_user_data.py | 2 +- .../__init__.py | 0 ...kacl_allow_ingress_any_port.metadata.json} | 11 +- .../ec2_networkacl_allow_ingress_any_port.py} | 4 +- ...networkacl_allow_ingress_any_port_test.py} | 34 +- ...cl_allow_ingress_tcp_port_22.metadata.json | 18 +- ...c2_networkacl_allow_ingress_tcp_port_22.py | 2 +- ..._allow_ingress_tcp_port_3389.metadata.json | 18 +- ..._networkacl_allow_ingress_tcp_port_3389.py | 2 +- ...ss_from_internet_to_any_port.metadata.json | 7 +- ...allow_ingress_from_internet_to_any_port.py | 2 +- ..._to_port_mongodb_27017_27018.metadata.json | 7 +- ...om_internet_to_port_mongodb_27017_27018.py | 2 +- ...ternet_to_tcp_ftp_port_20_21.metadata.json | 18 +- ...ess_from_internet_to_tcp_ftp_port_20_21.py | 2 +- ...from_internet_to_tcp_port_22.metadata.json | 18 +- ...ow_ingress_from_internet_to_tcp_port_22.py | 2 +- ...om_internet_to_tcp_port_3389.metadata.json | 18 +- ..._ingress_from_internet_to_tcp_port_3389.py | 2 +- ...ort_cassandra_7199_9160_8888.metadata.json | 7 +- ...et_to_tcp_port_cassandra_7199_9160_8888.py | 2 +- ...search_kibana_9200_9300_5601.metadata.json | 7 +- ...ort_elasticsearch_kibana_9200_9300_5601.py | 2 +- ...ernet_to_tcp_port_kafka_9092.metadata.json | 7 +- ...ss_from_internet_to_tcp_port_kafka_9092.py | 2 +- ..._to_tcp_port_memcached_11211.metadata.json | 7 +- ...om_internet_to_tcp_port_memcached_11211.py | 2 +- ...ernet_to_tcp_port_mysql_3306.metadata.json | 7 +- ...ss_from_internet_to_tcp_port_mysql_3306.py | 2 +- ...to_tcp_port_oracle_1521_2483.metadata.json | 8 +- ...m_internet_to_tcp_port_oracle_1521_2483.py | 2 +- ...et_to_tcp_port_postgres_5432.metadata.json | 7 +- ...from_internet_to_tcp_port_postgres_5432.py | 2 +- ...ernet_to_tcp_port_redis_6379.metadata.json | 7 +- ...ss_from_internet_to_tcp_port_redis_6379.py | 2 +- ...cp_port_sql_server_1433_1434.metadata.json | 7 +- ...ternet_to_tcp_port_sql_server_1433_1434.py | 2 +- ...ternet_to_tcp_port_telnet_23.metadata.json | 7 +- ...ess_from_internet_to_tcp_port_telnet_23.py | 2 +- ..._allow_wide_open_public_ipv4.metadata.json | 9 +- ...curitygroup_allow_wide_open_public_ipv4.py | 2 +- ...oup_default_restrict_traffic.metadata.json | 7 +- ..._securitygroup_default_restrict_traffic.py | 2 +- ...ritygroup_from_launch_wizard.metadata.json | 7 +- .../ec2_securitygroup_from_launch_wizard.py | 2 +- ...se_without_ingress_filtering.metadata.json | 9 +- ...ygroup_in_use_without_ingress_filtering.py | 2 +- .../ec2_securitygroup_not_used.metadata.json | 7 +- .../ec2_securitygroup_not_used.py | 2 +- ...th_many_ingress_egress_rules.metadata.json | 7 +- ...itygroup_with_many_ingress_egress_rules.py | 2 +- ...ies_lifecycle_policy_enabled.metadata.json | 68 +- ...r_repositories_lifecycle_policy_enabled.py | 2 +- ...ries_not_publicly_accessible.metadata.json | 68 +- ...cr_repositories_not_publicly_accessible.py | 2 +- ..._scan_images_on_push_enabled.metadata.json | 68 +- ...epositories_scan_images_on_push_enabled.py | 2 +- ...nerabilities_in_latest_image.metadata.json | 68 +- ...es_scan_vulnerabilities_in_latest_image.py | 2 +- ...tions_no_environment_secrets.metadata.json | 69 +- ...task_definitions_no_environment_secrets.py | 2 +- ...s_encryption_at_rest_enabled.metadata.json | 69 +- .../efs_encryption_at_rest_enabled.py | 2 +- .../efs_have_backup_enabled.metadata.json | 69 +- .../efs_have_backup_enabled.py | 2 +- .../efs_not_publicly_accessible.metadata.json | 68 +- .../efs_not_publicly_accessible.py | 2 +- ...ncryption_in_secrets_enabled.metadata.json | 68 +- ...r_kms_cmk_encryption_in_secrets_enabled.py | 2 +- ...e_endpoint_access_restricted.metadata.json | 67 +- ...ontrol_plane_endpoint_access_restricted.py | 2 +- ...ne_logging_all_types_enabled.metadata.json | 67 +- ...control_plane_logging_all_types_enabled.py | 2 +- ...ints_not_publicly_accessible.metadata.json | 69 +- .../eks_endpoints_not_publicly_accessible.py | 2 +- .../elb_insecure_ssl_ciphers.metadata.json | 67 +- .../elb_insecure_ssl_ciphers.py | 2 +- .../elb_internet_facing.metadata.json | 67 +- .../elb_internet_facing.py | 2 +- .../elb_logging_enabled.metadata.json | 67 +- .../elb_logging_enabled.py | 2 +- .../elb_ssl_listeners.metadata.json | 67 +- .../elb_ssl_listeners/elb_ssl_listeners.py | 2 +- .../elbv2_deletion_protection.metadata.json | 67 +- .../elbv2_deletion_protection.py | 2 +- ...elbv2_desync_mitigation_mode.metadata.json | 67 +- .../elbv2_desync_mitigation_mode.py | 2 +- .../elbv2_insecure_ssl_ciphers.metadata.json | 67 +- .../elbv2_insecure_ssl_ciphers.py | 2 +- .../elbv2_internet_facing.metadata.json | 67 +- .../elbv2_internet_facing.py | 2 +- .../elbv2_listeners_underneath.metadata.json | 67 +- .../elbv2_listeners_underneath.py | 2 +- .../elbv2_logging_enabled.metadata.json | 67 +- .../elbv2_logging_enabled.py | 2 +- .../elbv2_request_smugling.metadata.json | 67 +- .../elbv2_request_smugling.py | 2 +- .../elbv2_ssl_listeners.metadata.json | 67 +- .../elbv2_ssl_listeners.py | 2 +- .../elbv2_waf_acl_attached.metadata.json | 67 +- .../elbv2_waf_acl_attached.py | 2 +- ...account_public_block_enabled.metadata.json | 3 +- ...mr_cluster_account_public_block_enabled.py | 2 +- ...er_master_nodes_no_public_ip.metadata.json | 3 +- .../emr_cluster_master_nodes_no_public_ip.py | 2 +- ...r_cluster_publicly_accesible.metadata.json | 3 +- .../emr_cluster_publicly_accesible.py | 2 +- ..._vaults_policy_public_access.metadata.json | 3 +- .../glacier_vaults_policy_public_access.py | 2 +- ...passwords_encryption_enabled.metadata.json | 7 +- ...connection_passwords_encryption_enabled.py | 2 +- ..._metadata_encryption_enabled.metadata.json | 7 +- ...ta_catalogs_metadata_encryption_enabled.py | 2 +- ...base_connections_ssl_enabled.metadata.json | 7 +- .../glue_database_connections_ssl_enabled.py | 2 +- ...atch_logs_encryption_enabled.metadata.json | 7 +- ...ints_cloudwatch_logs_encryption_enabled.py | 2 +- ..._bookmark_encryption_enabled.metadata.json | 7 +- ...dpoints_job_bookmark_encryption_enabled.py | 2 +- ...points_s3_encryption_enabled.metadata.json | 7 +- ...lopment_endpoints_s3_encryption_enabled.py | 2 +- ...amazon_s3_encryption_enabled.metadata.json | 9 +- ...e_etl_jobs_amazon_s3_encryption_enabled.py | 2 +- ...atch_logs_encryption_enabled.metadata.json | 7 +- ...jobs_cloudwatch_logs_encryption_enabled.py | 2 +- ..._bookmark_encryption_enabled.metadata.json | 7 +- ...tl_jobs_job_bookmark_encryption_enabled.py | 2 +- .../guardduty_is_enabled.metadata.json | 65 +- .../guardduty_is_enabled.py | 2 +- ...ty_no_high_severity_findings.metadata.json | 65 +- .../guardduty_no_high_severity_findings.py | 2 +- ...dministrator_access_with_mfa.metadata.json | 7 +- .../iam_administrator_access_with_mfa.py | 2 +- .../iam_avoid_root_usage.metadata.json | 20 +- .../iam_avoid_root_usage.py | 2 +- ...iam_check_saml_providers_sts.metadata.json | 69 +- .../iam_check_saml_providers_sts.py | 2 +- ..._disable_30_days_credentials.metadata.json | 7 +- .../iam_disable_30_days_credentials.py | 2 +- ..._disable_45_days_credentials.metadata.json | 7 +- .../iam_disable_45_days_credentials.py | 2 +- ..._disable_90_days_credentials.metadata.json | 14 +- .../iam_disable_90_days_credentials.py | 2 +- ...y_permissive_role_assumption.metadata.json | 67 +- ...ustom_policy_permissive_role_assumption.py | 2 +- ...d_server_certificates_stored.metadata.json | 9 +- ...m_no_expired_server_certificates_stored.py | 2 +- .../iam_no_root_access_key.metadata.json | 20 +- .../iam_no_root_access_key.py | 2 +- ...words_within_90_days_or_less.metadata.json | 22 +- ...xpires_passwords_within_90_days_or_less.py | 2 +- ...am_password_policy_lowercase.metadata.json | 20 +- .../iam_password_policy_lowercase.py | 2 +- ...ord_policy_minimum_length_14.metadata.json | 20 +- .../iam_password_policy_minimum_length_14.py | 2 +- .../iam_password_policy_number.metadata.json | 20 +- .../iam_password_policy_number.py | 2 +- ...iam_password_policy_reuse_24.metadata.json | 20 +- .../iam_password_policy_reuse_24.py | 2 +- .../iam_password_policy_symbol.metadata.json | 20 +- .../iam_password_policy_symbol.py | 2 +- ...am_password_policy_uppercase.metadata.json | 20 +- .../iam_password_policy_uppercase.py | 2 +- ..._allows_privilege_escalation.metadata.json | 9 +- .../iam_policy_allows_privilege_escalation.py | 2 +- ...ached_only_to_group_or_roles.metadata.json | 20 +- ..._policy_attached_only_to_group_or_roles.py | 6 +- ...no_administrative_privileges.metadata.json | 80 +- ...iam_policy_no_administrative_privileges.py | 2 +- ...am_root_hardware_mfa_enabled.metadata.json | 20 +- .../iam_root_hardware_mfa_enabled.py | 2 +- .../iam_root_mfa_enabled.metadata.json | 20 +- .../iam_root_mfa_enabled.py | 2 +- ...am_rotate_access_key_90_days.metadata.json | 20 +- .../iam_rotate_access_key_90_days.py | 2 +- .../iam_support_role_created.metadata.json | 81 +- .../iam_support_role_created.py | 2 +- ...am_user_hardware_mfa_enabled.metadata.json | 9 +- .../iam_user_hardware_mfa_enabled.py | 2 +- ...r_mfa_enabled_console_access.metadata.json | 9 +- .../iam_user_mfa_enabled_console_access.py | 2 +- ..._no_setup_initial_access_key.metadata.json | 80 +- .../iam_user_no_setup_initial_access_key.py | 6 +- ...m_user_two_active_access_key.metadata.json | 9 +- .../iam_user_two_active_access_key.py | 2 +- .../kms_cmk_are_used.metadata.json | 7 +- .../kms/kms_cmk_are_used/kms_cmk_are_used.py | 2 +- .../kms_cmk_rotation_enabled.metadata.json | 7 +- .../kms_cmk_rotation_enabled.py | 2 +- ..._key_not_publicly_accessible.metadata.json | 7 +- .../kms_key_not_publicly_accessible.py | 2 +- .../macie_is_enabled.metadata.json | 7 +- .../macie_is_enabled/macie_is_enabled.py | 2 +- ...omains_audit_logging_enabled.metadata.json | 68 +- ...h_service_domains_audit_logging_enabled.py | 2 +- ...s_cloudwatch_logging_enabled.metadata.json | 68 +- ...vice_domains_cloudwatch_logging_enabled.py | 2 +- ...s_encryption_at_rest_enabled.metadata.json | 69 +- ...vice_domains_encryption_at_rest_enabled.py | 2 +- ...ttps_communications_enforced.metadata.json | 69 +- ...e_domains_https_communications_enforced.py | 2 +- ...ternal_user_database_enabled.metadata.json | 68 +- ..._domains_internal_user_database_enabled.py | 2 +- ...e_to_node_encryption_enabled.metadata.json | 69 +- ...domains_node_to_node_encryption_enabled.py | 2 +- ...ains_not_publicly_accessible.metadata.json | 68 +- ...service_domains_not_publicly_accessible.py | 2 +- ...est_service_software_version.metadata.json | 68 +- ..._to_the_latest_service_software_version.py | 2 +- ...to_authentication_for_kibana.metadata.json | 68 +- ...s_use_cognito_authentication_for_kibana.py | 2 +- .../rds_instance_backup_enabled.metadata.json | 3 +- .../rds_instance_backup_enabled.py | 2 +- ...instance_deletion_protection.metadata.json | 3 +- .../rds_instance_deletion_protection.py | 2 +- ..._enhanced_monitoring_enabled.metadata.json | 3 +- ...ds_instance_enhanced_monitoring_enabled.py | 2 +- ..._integration_cloudwatch_logs.metadata.json | 3 +- ...ds_instance_integration_cloudwatch_logs.py | 2 +- ...inor_version_upgrade_enabled.metadata.json | 3 +- ..._instance_minor_version_upgrade_enabled.py | 2 +- .../rds_instance_multi_az.metadata.json | 3 +- .../rds_instance_multi_az.py | 2 +- ...ds_instance_no_public_access.metadata.json | 3 +- .../rds_instance_no_public_access.py | 2 +- ...s_instance_storage_encrypted.metadata.json | 3 +- .../rds_instance_storage_encrypted.py | 2 +- .../rds_snapshots_public_access.metadata.json | 3 +- .../rds_snapshots_public_access.py | 4 +- ...dshift_cluster_audit_logging.metadata.json | 65 +- .../redshift_cluster_audit_logging.py | 2 +- ...t_cluster_automated_snapshot.metadata.json | 65 +- .../redshift_cluster_automated_snapshot.py | 2 +- ...t_cluster_automatic_upgrades.metadata.json | 65 +- .../redshift_cluster_automatic_upgrades.py | 2 +- ...dshift_cluster_public_access.metadata.json | 65 +- .../redshift_cluster_public_access.py | 2 +- ...s_privacy_protection_enabled.metadata.json | 3 +- ...te53_domains_privacy_protection_enabled.py | 2 +- ...domains_transferlock_enabled.metadata.json | 3 +- .../route53_domains_transferlock_enabled.py | 2 +- ...s_cloudwatch_logging_enabled.metadata.json | 3 +- ...hosted_zones_cloudwatch_logging_enabled.py | 2 +- ...t_level_public_access_blocks.metadata.json | 3 +- .../s3_account_level_public_access_blocks.py | 2 +- .../s3_bucket_acl_prohibited.metadata.json | 7 +- .../s3_bucket_acl_prohibited.py | 2 +- ...s3_bucket_default_encryption.metadata.json | 7 +- .../s3_bucket_default_encryption.py | 2 +- .../s3_bucket_no_mfa_delete.metadata.json | 7 +- .../s3_bucket_no_mfa_delete.py | 2 +- .../s3_bucket_object_versioning.metadata.json | 7 +- .../s3_bucket_object_versioning.py | 2 +- ...t_policy_public_write_access.metadata.json | 7 +- .../s3_bucket_policy_public_write_access.py | 2 +- .../s3_bucket_public_access.metadata.json | 7 +- .../s3_bucket_public_access.py | 4 +- ...cket_secure_transport_policy.metadata.json | 7 +- .../s3_bucket_secure_transport_policy.py | 2 +- ...erver_access_logging_enabled.metadata.json | 7 +- ...s3_bucket_server_access_logging_enabled.py | 2 +- ...ls_network_isolation_enabled.metadata.json | 65 +- ...emaker_models_network_isolation_enabled.py | 2 +- ...dels_vpc_settings_configured.metadata.json | 65 +- ...agemaker_models_vpc_settings_configured.py | 2 +- ..._instance_encryption_enabled.metadata.json | 65 +- ...er_notebook_instance_encryption_enabled.py | 2 +- ...nstance_root_access_disabled.metadata.json | 65 +- ..._notebook_instance_root_access_disabled.py | 2 +- ...ance_vpc_settings_configured.metadata.json | 65 +- ...tebook_instance_vpc_settings_configured.py | 2 +- ...t_internet_access_configured.metadata.json | 65 +- ...thout_direct_internet_access_configured.py | 2 +- ...container_encryption_enabled.metadata.json | 65 +- ..._jobs_intercontainer_encryption_enabled.py | 2 +- ...bs_network_isolation_enabled.metadata.json | 65 +- ...training_jobs_network_isolation_enabled.py | 2 +- ...nd_output_encryption_enabled.metadata.json | 65 +- ...bs_volume_and_output_encryption_enabled.py | 2 +- ...jobs_vpc_settings_configured.metadata.json | 65 +- ...r_training_jobs_vpc_settings_configured.py | 2 +- ...r_automatic_rotation_enabled.metadata.json | 5 +- ...cretsmanager_automatic_rotation_enabled.py | 2 +- .../securityhub_enabled.metadata.json | 7 +- .../securityhub_enabled.py | 2 +- ...on_in_associated_elastic_ips.metadata.json | 5 +- ...ed_protection_in_associated_elastic_ips.py | 4 +- ...on_in_classic_load_balancers.metadata.json | 7 +- ...ed_protection_in_classic_load_balancers.py | 4 +- ..._in_cloudfront_distributions.metadata.json | 5 +- ..._protection_in_cloudfront_distributions.py | 4 +- ...ction_in_global_accelerators.metadata.json | 5 +- ...anced_protection_in_global_accelerators.py | 4 +- ...ternet_facing_load_balancers.metadata.json | 5 +- ...ction_in_internet_facing_load_balancers.py | 6 +- ...tion_in_route53_hosted_zones.metadata.json | 5 +- ...nced_protection_in_route53_hosted_zones.py | 6 +- ...s_encryption_at_rest_enabled.metadata.json | 65 +- ...s_topics_kms_encryption_at_rest_enabled.py | 2 +- ...pics_not_publicly_accessible.metadata.json | 65 +- .../sns_topics_not_publicly_accessible.py | 2 +- ...eues_not_publicly_accessible.metadata.json | 65 +- .../sqs_queues_not_publicly_accessible.py | 2 +- ...rver_side_encryption_enabled.metadata.json | 65 +- ...s_queues_server_side_encryption_enabled.py | 2 +- providers/aws/services/sqs/sqs_service.py | 13 +- .../ssm_document_secrets.metadata.json | 3 +- .../ssm_document_secrets.py | 2 +- .../ssm_documents_set_as_public.metadata.json | 3 +- .../ssm_documents_set_as_public.py | 2 +- ...m_managed_compliant_patching.metadata.json | 3 +- .../ssm_managed_compliant_patching.py | 2 +- ...dadvisor_errors_and_warnings.metadata.json | 65 +- .../trustedadvisor_errors_and_warnings.py | 2 +- ...connections_trust_boundaries.metadata.json | 7 +- ...c_endpoint_connections_trust_boundaries.py | 4 +- ..._principals_trust_boundaries.metadata.json | 7 +- ...ces_allowed_principals_trust_boundaries.py | 4 +- .../vpc_flow_logs_enabled.metadata.json | 7 +- .../vpc_flow_logs_enabled.py | 2 +- ..._tables_with_least_privilege.metadata.json | 7 +- ...ing_routing_tables_with_least_privilege.py | 2 +- ...es_volume_encryption_enabled.metadata.json | 65 +- .../workspaces_volume_encryption_enabled.py | 2 +- prowler | 81 +- 515 files changed, 6018 insertions(+), 5614 deletions(-) rename compliance/{ => aws}/cis_1.4_aws.json (100%) rename compliance/{ => aws}/cis_1.5_aws.json (100%) create mode 100644 compliance/aws/ens_rd2022_aws.json delete mode 100644 compliance/ens_rd2022_aws.json create mode 100644 lib/check/compliance.py create mode 100644 lib/check/compliance_models.py create mode 100644 lib/check/fixtures/aws/cis_v1.4_aws.json create mode 100644 lib/check/fixtures/aws/ens_v3_aws.json rename providers/aws/services/ec2/{ec2_network_acls_allow_ingress_any_port => ec2_networkacl_allow_ingress_any_port}/__init__.py (100%) rename providers/aws/services/ec2/{ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.metadata.json => ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.metadata.json} (83%) rename providers/aws/services/ec2/{ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.py => ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.py} (90%) rename providers/aws/services/ec2/{ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port_test.py => ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port_test.py} (72%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 71c6fddd..ee56761c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,28 +1,34 @@ repos: -## GENERAL + ## GENERAL - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: - id: check-merge-conflict - id: check-yaml - args: ['--unsafe'] + args: ["--unsafe"] - id: check-json - id: end-of-file-fixer - id: trailing-whitespace - id: no-commit-to-branch - id: pretty-format-json - args: ['--autofix', --no-sort-keys, --no-ensure-ascii] -## BASH + args: ["--autofix", --no-sort-keys, --no-ensure-ascii] + + ## BASH - repo: https://github.com/koalaman/shellcheck-precommit rev: v0.8.0 hooks: - - id: shellcheck -## PYTHON + - id: shellcheck + ## PYTHON - repo: https://github.com/myint/autoflake rev: v1.7.7 hooks: - id: autoflake - args: ['--in-place', '--remove-all-unused-imports', '--remove-unused-variable'] + args: + [ + "--in-place", + "--remove-all-unused-imports", + "--remove-unused-variable", + ] - repo: https://github.com/timothycrosley/isort rev: 5.10.1 @@ -40,9 +46,7 @@ repos: hooks: - id: flake8 exclude: contrib - args: [ - "--ignore=E266,W503,E203,E501,W605" - ] + args: ["--ignore=E266,W503,E203,E501,W605"] - repo: https://github.com/haizaar/check-pipfile-lock rev: v0.0.5 @@ -63,18 +67,18 @@ repos: - id: bandit name: bandit - description: 'Bandit is a tool for finding common security issues in Python code' + description: "Bandit is a tool for finding common security issues in Python code" entry: bash -c 'bandit -q -lll -x '*_test.py,./contrib/' -r .' language: system - id: safety name: safety - description: 'Safety is a tool that checks your installed dependencies for known security vulnerabilities' + description: "Safety is a tool that checks your installed dependencies for known security vulnerabilities" entry: bash -c 'safety check' language: system - id: vulture name: vulture - description: 'Vulture finds unused code in Python programs.' + description: "Vulture finds unused code in Python programs." entry: bash -c 'vulture --exclude "contrib" --min-confidence 100 .' language: system diff --git a/check_sample.metadata.json b/check_sample.metadata.json index 55a070d0..8a7d5785 100644 --- a/check_sample.metadata.json +++ b/check_sample.metadata.json @@ -39,17 +39,5 @@ "othercheck3", "othercheck4" ], - "Notes": "additional information", - "Compliance": [ - { - "Control": [ - "4.1" - ], - "Framework": "CIS-AWS", - "Group": [ - "level2" - ], - "Version": "1.4" - } - ] + "Notes": "additional information" } diff --git a/compliance/cis_1.4_aws.json b/compliance/aws/cis_1.4_aws.json similarity index 100% rename from compliance/cis_1.4_aws.json rename to compliance/aws/cis_1.4_aws.json diff --git a/compliance/cis_1.5_aws.json b/compliance/aws/cis_1.5_aws.json similarity index 100% rename from compliance/cis_1.5_aws.json rename to compliance/aws/cis_1.5_aws.json diff --git a/compliance/aws/ens_rd2022_aws.json b/compliance/aws/ens_rd2022_aws.json new file mode 100644 index 00000000..11c1461c --- /dev/null +++ b/compliance/aws/ens_rd2022_aws.json @@ -0,0 +1,1636 @@ +{ + "Framework": "ENS", + "Version": "RD2022", + "Provider": "AWS", + "Requirements": [ + { + "Id": "op.acc.1.aws.iam.2", + "Description": "Proveedor de identidad centralizado", + "Attributes": [ + { + "IdGrupoControl": "op.acc.1", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Es muy recomendable la utilización de un proveedor de identidades que permita administrar las identidades en un lugar centralizado, en vez de utilizar IAM para ello.", + "Nivel": "bajo", + "Tipo": "recomendacion", + "Dimensiones": [ + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_check_saml_providers_sts" + ] + }, + { + "Id": "op.acc.2.aws.iam.4", + "Description": "Requisitos de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.acc.2", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Se deberá delegar en cuentas administradoras la administración de la organización, dejando la cuenta maestra sin uso y con las medidas de seguridad pertinentes.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_avoid_root_usage" + ] + }, + { + "Id": "op.acc.3.r1.aws.iam.1", + "Description": "Segregación rigurosa", + "Attributes": [ + { + "IdGrupoControl": "op.acc.3.r1", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "En caso de ser de aplicación, la segregación deberá tener en cuenta la separación de las funciones de configuración y mantenimiento y de auditoría de cualquier otra.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_support_role_created" + ] + }, + { + "Id": "op.acc.4.aws.iam.1", + "Description": "Proceso de gestión de derechos de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.acc.4", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Las políticas IAM deben permitir sólo los privilegios necesarios para cada rol. Se recomienda comenzar con el mínimo nivel de permisos e ir añadiendo permisos adicionales según vaya surgiendo la necesidad en lugar de comenzar con permisos administrativos.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "sqs_queues_not_publicly_accessible", + "s3_bucket_policy_public_write_access", + "awslambda_function_not_publicly_accessible", + "iam_no_custom_policy_permissive_role_assumption", + "cloudwatch_cross_account_sharing_disabled", + "awslambda_function_url_public", + "awslambda_function_url_cors_policy", + "iam_policy_allows_privilege_escalation", + "iam_policy_no_administrative_privileges" + ] + }, + { + "Id": "op.acc.4.aws.iam.9", + "Description": "Proceso de gestión de derechos de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.acc.4", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Se restringirá todo acceso a las acciones especificadas para el usuario root de una cuenta.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_no_root_access_key", + "iam_avoid_root_usage" + ] + }, + { + "Id": "op.acc.4.aws.iam.11", + "Description": "Proceso de gestión de derechos de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.acc.4", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Se configurarán diferentes permisos a las cuentas de usuario, limitando la utilización de la cuenta “root” para tareas específicas que necesiten un nivel de privilegios elevado, esta configuración debe entenderse como un mecanismo para impedir que el trabajo directo con usuarios con privilegios de administrador repercuta negativamente en la seguridad, a acometer todas las acciones con el máximo privilegio cuando este no es siempre requerido.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_policy_no_administrative_privileges" + ] + }, + { + "Id": "op.acc.4.aws.sys.1", + "Description": "Proceso de gestión de derechos de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.acc.4", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Habilitar systems manager automation para evitar acceso remoto humano a tareas automatizables.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "ec2_instance_managed_by_ssm" + ] + }, + { + "Id": "op.acc.6.aws.iam.1", + "Description": "Mecanismo de autenticación (usuarios de la organización)", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Evitar el uso permanente de múltiples claves de acceso para un mismo usuario IAM.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_user_two_active_access_key" + ] + }, + { + "Id": "op.acc.6.aws.iam.2", + "Description": "Mecanismo de autenticación (usuarios de la organización)", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Las claves de acceso deberán rotarse cada 90 días o menos.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_disable_90_days_credentials", + "iam_rotate_access_key_90_days" + ] + }, + { + "Id": "op.acc.6.aws.iam.3", + "Description": "Mecanismo de autenticación (usuarios de la organización)", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Deberá habilitarse el vencimiento de las credenciales de los usuarios. (Bien a través de la política de contraseñas de IAM o del proveedor de identidades federado).", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_password_policy_expires_passwords_within_90_days_or_less", + "iam_disable_90_days_credentials", + "iam_rotate_access_key_90_days" + ] + }, + { + "Id": "op.acc.6.aws.iam.4", + "Description": "Mecanismo de autenticación (usuarios de la organización)", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Se deberá evitar la asignación por defecto de claves de acceso para todos los usuarios que tengan acceso a la consola. Para cumplir con este requisito, se recomienda revisar qué usuarios se encuentran dados de alta en la cuenta de AWS y disponen de acceso a la consola de administración y evitar la asignación de claves de acceso cuando no son necesarias.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_user_no_setup_initial_access_key" + ] + }, + { + "Id": "op.acc.6.r1.aws.iam.1", + "Description": "Contraseñas", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6.r1", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Las contraseñas de los usuarios deberán tener normas de complejidad mínima y robustez.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_password_policy_lowercase", + "iam_password_policy_minimum_length_14", + "iam_password_policy_number", + "iam_password_policy_reuse_24", + "iam_password_policy_symbol", + "iam_password_policy_uppercase" + ] + }, + { + "Id": "op.acc.6.r2.aws.iam.1", + "Description": "Contraseña + otro factor de autenticación", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6.r2", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "MFA deberá estar habilitado para todas las cuentas que tengan contraseña para acceder a la consola, incluyendo el usuario root.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_user_mfa_enabled_console_access" + ] + }, + { + "Id": "op.acc.6.r4.aws.iam.1", + "Description": "Certificados en dispositvo físico", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6.r4", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Habilitar los dispositivos MFA físicos para todos los usuarios IAM mediante la consola, línea de comandos o la propia API de IAM. Del mismo modo, el uso de estos certificados deberá estar protegido por un segundo factor de tipo PIN o biométrico.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_user_mfa_enabled_console_access", + "iam_root_hardware_mfa_enabled" + ] + }, + { + "Id": "op.acc.6.r4.aws.ct.1", + "Description": "Certificados en dispositvo físico", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6.r4", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Habilitar los dispositivos MFA físicos para todos los usuarios IAM mediante la consola, línea de comandos o la propia API de IAM. Del mismo modo, el uso de estos certificados deberá estar protegido por un segundo factor de tipo PIN o biométrico.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_administrator_access_with_mfa", + "iam_root_mfa_enabled", + "iam_user_mfa_enabled_console_access" + ] + }, + { + "Id": "op.acc.6.r5.aws.iam.1", + "Description": "Registro", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6.r5", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Para registrar los intentos de acceso, se deberá habilitar CloudTrail en todas las regiones y activar el registro de acceso de usuarios.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "cloudtrail_multi_region_enabled" + ] + }, + { + "Id": "op.acc.6.r7.aws.iam.1", + "Description": "Suspensión por no utilización", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6.r7", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Activar la deshabilitación de las credenciales de los usuarios IAM que no hayan sido empleadas durante un periodo de tiempo (o bien, se deberá establecer la deshabilitación en el proveedor de identidades).", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_disable_30_days_credentials", + "iam_disable_90_days_credentials" + ] + }, + { + "Id": "op.acc.6.r8.aws.iam.1", + "Description": "Doble factor para acceso desde o a través de zonas no controladas", + "Attributes": [ + { + "IdGrupoControl": "op.acc.6.r8", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Se deberá emplear como mecanismo de autenticación o bien una contraseña más otro factor de autenticación, o bien un certificado cualificado (con o sin soporte físico) protegido por un doble factor de autenticación.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ] + } + ], + "Checks": [ + "iam_user_mfa_enabled_console_access" + ] + }, + { + "Id": "op.exp.1.aws.cfg.1", + "Description": "Inventario de activos", + "Attributes": [ + { + "IdGrupoControl": "op.exp.1", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "En lo referente al inventariado de activos, asegurar que AWS Config está habilitado en todas las regiones y utilizar la herramienta para obtener una vista de los recursos existentes en las cuentas de AWS.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "config_recorder_all_regions_enabled" + ] + }, + { + "Id": "op.exp.1.aws.sys.1", + "Description": "Inventario de activos", + "Attributes": [ + { + "IdGrupoControl": "op.exp.1", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "En el ámbito del software desplegado en las instancias de EC2, habilitar AWS System Manager Inventory para todo el entorno de EC2 en caso de no utilizar herramientas de terceros.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "ec2_instance_managed_by_ssm", + "ssm_managed_compliant_patching" + ] + }, + { + "Id": "op.exp.3.aws.cfg.1", + "Description": "Gestión de la configuración de seguridad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.3", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "El cumplimiento de los requisitos se puede apoyar en la utilización de los servicios Config, Config Rules y Conformance Packs para identificar líneas base de configuración para evaluar si los recursos de AWS se ajustan a las prácticas autorizadas por la organización.", + "Nivel": "bajo", + "Tipo": "recomendacion", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "config_recorder_all_regions_enabled" + ] + }, + { + "Id": "op.exp.3.r3.aws.cfg.1", + "Description": "Copias de seguridad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.3.r3", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "La entidad usuaria puede consultar el histórico de configuraciones de recursos en AWS Config.", + "Nivel": "bajo", + "Tipo": "recomendacion", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "config_recorder_all_regions_enabled" + ] + }, + { + "Id": "op.exp.4.aws.sys.2", + "Description": "Mantenimiento y actualizaciones de seguridad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.4", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Utilizar AWS Systems Manager Patch Manager para planificar y gestionar la aplicación de parches minimizando los riesgos asociados a tener instancias con software desactualizado y expuesto a vulnerabilidades conocidas.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "ssm_managed_compliant_patching" + ] + }, + { + "Id": "op.exp.4.r2.aws.sys.1", + "Description": "Prevención de fallos", + "Attributes": [ + { + "IdGrupoControl": "op.exp.4.r2", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Utilizar la solución AWS Systems Manager Automation para automatizar las tareas de corrección en servicios de AWS como EC2 y RDS.", + "Nivel": "bajo", + "Tipo": "recomendacion", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "ec2_instance_managed_by_ssm", + "ssm_managed_compliant_patching" + ] + }, + { + "Id": "op.exp.5.aws.ct.1", + "Description": "Gestión de cambios", + "Attributes": [ + { + "IdGrupoControl": "op.exp.5", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Asegurar que CloudTrail esté activo para todas las regiones.", + "Nivel": "bajo", + "Tipo": "recomendacion", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "cloudtrail_multi_region_enabled" + ] + }, + { + "Id": "op.exp.6.aws.gd.1", + "Description": "Protección frente a código dañino", + "Attributes": [ + { + "IdGrupoControl": "op.exp.6", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Activar la protección contra software malintencionado de GuardDuty en todas las regiones.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "guardduty_is_enabled" + ] + }, + { + "Id": "op.exp.8.aws.ct.1", + "Description": "Registro de actividad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Habilitar la herramienta CloudTrail en todas las regiones. Este serviio está habilitado por defecto cuando se crea una nueva cuenta, pero es posible deshabilitarlo.", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_multi_region_enabled" + ] + }, + { + "Id": "op.exp.8.aws.ct.2", + "Description": "Registro de actividad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Establecer un filtro de métricas desde AWS CloudWatch para detectar cambios en las configuraciones de CloudTrail", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled" + ] + }, + { + "Id": "op.exp.8.aws.ct.3", + "Description": "Registro de actividad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Habilitar la validación de archivos en todos los trails, evitando así que estos se vean modificados o eliminados.", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_log_file_validation_enabled" + ] + }, + { + "Id": "op.exp.8.aws.ct.4", + "Description": "Registro de actividad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Habilitar la entrega continua de eventos de CloudTrail a un bucket S3 dedicado con el fin de unificar los archivos de registro.", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_logs_s3_bucket_is_not_publicly_accessible", + "cloudtrail_s3_dataevents_write_enabled" + ] + }, + { + "Id": "op.exp.8.aws.ct.5", + "Description": "Registro de actividad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Se deberán habilitar alertas para los siguientes eventos:\n* Llamadas no permitidas a la API\n* Accesos no permitidos a la consola\n* Todos los intentos de acceso sin el correcto uso de MFA\n* Toda la actividad realizada sobre y por la cuenta root\n* Cualquier cambio en las políticas IAM", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudwatch_log_metric_filter_unauthorized_api_calls", + "cloudwatch_log_metric_filter_authentication_failures", + "cloudwatch_log_metric_filter_sign_in_without_mfa", + "cloudwatch_log_metric_filter_root_usage", + "cloudwatch_log_metric_filter_policy_changes" + ] + }, + { + "Id": "op.exp.8.r1.aws.ct.2", + "Description": "Revisión de los registros", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r1", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Configurar la herramienta CloudTrail de manera que realice el registro de eventos de administración, eventos de datos y eventos anómalos (insights).", + "Nivel": "pytec", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled", + "cloudtrail_s3_dataevents_write_enabled", + "cloudtrail_s3_dataevents_read_enabled" + ] + }, + { + "Id": "op.exp.8.r1.aws.ct.4", + "Description": "Revisión de los registros", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r1", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Registrar los eventos de lectura y escritura de datos.", + "Nivel": "pytec", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_s3_dataevents_write_enabled", + "cloudtrail_s3_dataevents_read_enabled" + ] + }, + { + "Id": "op.exp.8.r1.aws.ct.6", + "Description": "Revisión de los registros", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r1", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Habilitar la entrega continua de eventos de CloudTrail a un bucket de Amazon S3", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_logs_s3_bucket_access_logging_enabled" + ] + }, + { + "Id": "op.exp.8.r1.aws.ct.7", + "Description": "Revisión de los registros", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r1", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Integrar CloudTrail con el servicio CloudWatch Logs", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_cloudwatch_logging_enabled" + ] + }, + { + "Id": "op.exp.8.r3.aws.cw.1", + "Description": "Retención de registros", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r3", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Ejecutar la acción PutRetentionPolicy de Amazon CloudWatch, permitiendo así establecer la retención del grupo de registros especificado y configurar el número de días durante los cuales se conservarán los eventos de registro en el grupo seleccionado de acuerdo con el documento de seguridad correspondiente. Paralelamente, se debe definir un periodo de retención para los datos almacenados en CloudTrail Lakes.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudwatch_log_group_retention_policy_specific_days_enabled" + ] + }, + { + "Id": "op.exp.8.r4.aws.ct.3", + "Description": "Control de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r4", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Utilizar una política de bucket para restringir el acceso de forma pública e imponer restricciones sobre cuáles de los usuarios pueden eliminar objetos de Amazon S3.", + "Nivel": "bajo", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "s3_bucket_public_access", + "cloudtrail_logs_s3_bucket_is_not_publicly_accessible", + "s3_bucket_policy_public_write_access" + ] + }, + { + "Id": "op.exp.8.r4.aws.ct.5", + "Description": "Control de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r4", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Configurar los archivos de logs de AWS CloudTrail para aprovechar el cifrado del lado del servidor (SSE – Server Side Encryption) y las claves maestras creadas por el cliente (CMK de KMS).", + "Nivel": "pytec", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_kms_encryption_enabled" + ] + }, + { + "Id": "op.exp.8.r4.aws.ct.6", + "Description": "Control de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r4", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "El almacén de logs de CloudTrail no debería ser accesible de forma pública", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_logs_s3_bucket_access_logging_enabled" + ] + }, + { + "Id": "op.exp.8.r4.aws.ct.7", + "Description": "Control de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r4", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "El almacén de logs de CloudTrail no debería ser accesible de forma pública (ACLs)", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_logs_s3_bucket_is_not_publicly_accessible" + ] + }, + { + "Id": "op.exp.8.r4.aws.ct.8", + "Description": "Control de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8.r4", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Cifrado de los trails con KMS", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "trazabilidad" + ] + } + ], + "Checks": [ + "cloudtrail_kms_encryption_enabled" + ] + }, + { + "Id": "op.exp.10.aws.cmk.6", + "Description": "Protección de claves criptográficas", + "Attributes": [ + { + "IdGrupoControl": "op.exp.10", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Activar la rotación de las claves CMK.", + "Nivel": "pytec", + "Tipo": "medida", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "kms_cmk_rotation_enabled" + ] + }, + { + "Id": "op.exp.10.aws.cmk.7", + "Description": "Protección de claves criptográficas", + "Attributes": [ + { + "IdGrupoControl": "op.exp.10", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": " Para el archivo posterior a la explotación y destrucción de las claves se debe deshabilitar todas las claves CMK que no estén en uso.", + "Nivel": "pytec", + "Tipo": "medida", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk" + ] + }, + { + "Id": "op.exp.10.aws.cmk.8", + "Description": "Protección de claves criptográficas", + "Attributes": [ + { + "IdGrupoControl": "op.exp.10", + "Marco": "operacional", + "Categoria": "explotación", + "DescripcionControl": "Eliminar las claves deshabilitadas que no estén en uso y no mantengan ningún objeto o recurso cifrado, completando el ciclo de vida de la clave.", + "Nivel": "pytec", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk" + ] + }, + { + "Id": "op.mon.1.aws.gd.1", + "Description": "Detección de intrusión", + "Attributes": [ + { + "IdGrupoControl": "op.mon.1", + "Marco": "operacional", + "Categoria": "monitorización del sistema", + "DescripcionControl": "En ausencia de otras herramientas de terceros, habilitar Amazon GuarDuty para la detección de amenazas e intrusiones.", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "guardduty_is_enabled" + ] + }, + { + "Id": "op.mon.1.aws.ct.1", + "Description": "Detección de intrusión", + "Attributes": [ + { + "IdGrupoControl": "op.mon.1", + "Marco": "operacional", + "Categoria": "monitorización del sistema", + "DescripcionControl": "Activar el servicio de eventos AWS CloudTrail para todas las regiones.", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "cloudtrail_multi_region_enabled" + ] + }, + { + "Id": "op.mon.1.aws.flow.1", + "Description": "Detección de intrusión", + "Attributes": [ + { + "IdGrupoControl": "op.mon.1", + "Marco": "operacional", + "Categoria": "monitorización del sistema", + "DescripcionControl": "Activar el servicio VPC FlowLogs.", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "vpc_flow_logs_enabled" + ] + }, + { + "Id": "op.mon.1.aws.gd.2", + "Description": "Detección de intrusión", + "Attributes": [ + { + "IdGrupoControl": "op.mon.1", + "Marco": "operacional", + "Categoria": "monitorización del sistema", + "DescripcionControl": "Deberá habilitarse Amazon GuardDuty para todas las regiones tanto en la cuenta raíz como en las cuentas miembro de un entorno multi-cuenta.", + "Nivel": "alto", + "Tipo": "medida", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "guardduty_is_enabled" + ] + }, + { + "Id": "op.mon.2.aws.sh.1", + "Description": "Sistema de métricas", + "Attributes": [ + { + "IdGrupoControl": "op.mon.2", + "Marco": "operacional", + "Categoria": "monitorización del sistema", + "DescripcionControl": "Utilizar Security Hub para obtener una vista consolidada de los hallazgos de seguridad en los servicios de AWS habilitados.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "securityhub_enabled" + ] + }, + { + "Id": "mp.com.1.aws.sg.1", + "Description": "Perímetro seguro", + "Attributes": [ + { + "IdGrupoControl": "mp.com.1", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Asegurar que el Security Group restrinja todo el tráfico. Para ello, se deberán agregar las reglas del Security Group que se aplica por defecto cuando se crea una VPC.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "ec2_securitygroup_default_restrict_traffic", + "ec2_securitygroup_from_launch_wizard" + ] + }, + { + "Id": "mp.com.1.aws.sg.2", + "Description": "Perímetro seguro", + "Attributes": [ + { + "IdGrupoControl": "mp.com.1", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Evitar la existencia de Security Groups que dejen abierto todo el tráfico entrante.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "ec2_securitygroup_in_use_without_ingress_filtering" + ] + }, + { + "Id": "mp.com.1.aws.sg.3", + "Description": "Perímetro seguro", + "Attributes": [ + { + "IdGrupoControl": "mp.com.1", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Evitar tener un repositorio de Security Groups que no estén siendo usados.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "ec2_securitygroup_not_used" + ] + }, + { + "Id": "mp.com.1.aws.elb.1", + "Description": "Perímetro seguro", + "Attributes": [ + { + "IdGrupoControl": "mp.com.1", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Habilitar TLS en los balanceadores de carga ELB ", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "elb_ssl_listeners" + ] + }, + { + "Id": "mp.com.1.aws.elb.2", + "Description": "Perímetro seguro", + "Attributes": [ + { + "IdGrupoControl": "mp.com.1", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Evitar el uso de protocolos de cifrado inseguros para las políticas de seguridad de ELB. Esto podría dejar la conexión SSL entre balanceadores y clientes vulnerables a ser explotados. En particular deberá evitarse el uso de TLS 1.0. ", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "elb_insecure_ssl_ciphers" + ] + }, + { + "Id": "mp.com.1.aws.s3.1", + "Description": "Perímetro seguro", + "Attributes": [ + { + "IdGrupoControl": "mp.com.1", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Asegurar que los Buckets S3 de almacenamiento apliquen cifrado para la transferencia de datos empleando Secure Sockets Layer (SSL)", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "s3_bucket_secure_transport_policy" + ] + }, + { + "Id": "mp.com.1.aws.cf.1", + "Description": "Perímetro seguro", + "Attributes": [ + { + "IdGrupoControl": "mp.com.1", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Asegurar que la distribución entre frontales CloudFront y sus orígenes únicamente emplee tráfico HTTPs ", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "cloudfront_distributions_https_enabled" + ] + }, + { + "Id": "mp.com.3.aws.elb.1", + "Description": "Protección de la integridad y de la autenticidad", + "Attributes": [ + { + "IdGrupoControl": "mp.com.3", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Habilitar TLS en los balanceadores de carga ELB.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "integridad", + "autenticidad" + ] + } + ], + "Checks": [ + "elbv2_insecure_ssl_ciphers" + ] + }, + { + "Id": "mp.com.3.aws.elb.2", + "Description": "Protección de la integridad y de la autenticidad", + "Attributes": [ + { + "IdGrupoControl": "mp.com.3", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Evitar el uso de protocolos de cifrado inseguros en la conexión TLS entre clientes y balanceadores de carga. En particular, se deberá evitar el uso de TLS 1.0.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "integridad", + "autenticidad" + ] + } + ], + "Checks": [ + "elbv2_insecure_ssl_ciphers" + ] + }, + { + "Id": "mp.com.3.aws.s3.1", + "Description": "Protección de la integridad y de la autenticidad", + "Attributes": [ + { + "IdGrupoControl": "mp.com.3", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Asegurar que los Buckets de almacenamiento S3 apliquen cifrado para la transferencia de datos empleando TLS.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "integridad", + "autenticidad" + ] + } + ], + "Checks": [ + "s3_bucket_secure_transport_policy" + ] + }, + { + "Id": "mp.com.3.aws.cf.1", + "Description": "Protección de la integridad y de la autenticidad", + "Attributes": [ + { + "IdGrupoControl": "mp.com.3", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Asegurar que la distribución entre frontales CloudFront y sus orígenes únicamente emplee tráfico HTTPS.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "integridad", + "autenticidad" + ] + } + ], + "Checks": [ + "cloudfront_distributions_https_enabled" + ] + }, + { + "Id": "mp.com.4.aws.vpc.2", + "Description": "Separación de flujos de información en la red", + "Attributes": [ + { + "IdGrupoControl": "mp.com.4", + "Marco": "medidas de protección", + "Categoria": "protección de las comunicaciones", + "DescripcionControl": "Evitar el uso de subnets con la opción de asignación automática de IPs (auto-assign Public IP).", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "ec2_instance_public_ip" + ] + }, + { + "Id": "mp.si.2.aws.kms.1", + "Description": "Criptografía", + "Attributes": [ + { + "IdGrupoControl": "mp.si.2", + "Marco": "medidas de protección", + "Categoria": "protección de los soportes de información", + "DescripcionControl": "Aplicar cifrado sobre el almacenamiento de las instancias en todos sus volúmenes de datos.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad" + ] + } + ], + "Checks": [ + "ec2_ebs_volume_encryption" + ] + }, + { + "Id": "mp.si.2.aws.s3.1", + "Description": "Criptografía", + "Attributes": [ + { + "IdGrupoControl": "mp.si.2", + "Marco": "medidas de protección", + "Categoria": "protección de los soportes de información", + "DescripcionControl": "Aplicar cifrado sobre los distintos buckets de S3, de los cuales se debe asegurar que tengan activado el cifrado en reposo.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad" + ] + } + ], + "Checks": [ + "s3_bucket_default_encryption" + ] + }, + { + "Id": "mp.si.2.aws.sqs.1", + "Description": "Criptografía", + "Attributes": [ + { + "IdGrupoControl": "mp.si.2", + "Marco": "medidas de protección", + "Categoria": "protección de los soportes de información", + "DescripcionControl": "Aplicar cifrado sobre las colas de mensajes de AWS (Amazon SQS).", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad" + ] + } + ], + "Checks": [ + "sqs_queues_server_side_encryption_enabled" + ] + }, + { + "Id": "mp.si.2.aws.dydb.1", + "Description": "Criptografía", + "Attributes": [ + { + "IdGrupoControl": "mp.si.2", + "Marco": "medidas de protección", + "Categoria": "protección de los soportes de información", + "DescripcionControl": "Aplicar cifrado sobre las bases de datos DynamoDB, que deben implementar cifrado seguro mediante el uso de claves de cliente (KMS).", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad" + ] + } + ], + "Checks": [ + "dynamodb_tables_kms_cmk_encryption_enabled" + ] + }, + { + "Id": "mp.si.2.aws.es.1", + "Description": "Criptografía", + "Attributes": [ + { + "IdGrupoControl": "mp.si.2", + "Marco": "medidas de protección", + "Categoria": "protección de los soportes de información", + "DescripcionControl": "Aplicar cifrado sobre todos los dominios del servicio Amazon Elasticsearch Service (ES). En caso de usar este servicio, deberá asegurarse la activación del cifrado en reposo.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad" + ] + } + ], + "Checks": [ + "opensearch_service_domains_encryption_at_rest_enabled" + ] + }, + { + "Id": "mp.si.2.r2.aws.ebs.1", + "Description": "Copias de seguridad", + "Attributes": [ + { + "IdGrupoControl": "mp.si.2.r2", + "Marco": "medidas de protección", + "Categoria": "protección de los soportes de información", + "DescripcionControl": "Se deberá asegurar el cifrado de las copias de seguridad (snapshots) de EBS.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad" + ] + } + ], + "Checks": [ + "ec2_ebs_snapshots_encrypted" + ] + }, + { + "Id": "mp.s.2.aws.waf.1", + "Description": "Protección de servicios y aplicaciones web", + "Attributes": [ + { + "IdGrupoControl": "mp.s.2", + "Marco": "medidas de protección", + "Categoria": "protección de los servicios", + "DescripcionControl": "Todas las aplicaciones web distribuidas por el servicio de AWS CloudFront deben estar integradas con el servicio de firewall de aplicaciones web AWS WAF.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "cloudfront_distributions_using_waf" + ] + }, + { + "Id": "mp.s.2.aws.waf.2", + "Description": "Protección de servicios y aplicaciones web", + "Attributes": [ + { + "IdGrupoControl": "mp.s.2", + "Marco": "medidas de protección", + "Categoria": "protección de los servicios", + "DescripcionControl": "Los API gateways deben tener un ACL WAF asociado.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "apigateway_waf_acl_attached" + ] + }, + { + "Id": "mp.s.2.aws.waf.3", + "Description": "Protección de servicios y aplicaciones web", + "Attributes": [ + { + "IdGrupoControl": "mp.s.2", + "Marco": "medidas de protección", + "Categoria": "protección de los servicios", + "DescripcionControl": "Todos los balanceadores de aplicación deben estar integrados con el servicio de firewall de aplicación web para quedar protegidos ante ataques de la capa de aplicación", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "elbv2_waf_acl_attached" + ] + }, + { + "Id": "mp.s.4.r1.aws.shieldadv.1", + "Description": "Detección y reacción", + "Attributes": [ + { + "IdGrupoControl": "mp.s.4.r1", + "Marco": "medidas de protección", + "Categoria": "protección de los servicios", + "DescripcionControl": "Activar AWS Shield Advanced con el fin de disponer de una herramienta de prevención, detección y mitigación de ataques de denegación de servicio.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "disponibilidad" + ] + } + ], + "Checks": [ + "shield_advanced_protection_in_associated_elastic_ips", + "shield_advanced_protection_in_classic_load_balancers", + "shield_advanced_protection_in_cloudfront_distributions", + "shield_advanced_protection_in_global_accelerators", + "shield_advanced_protection_in_internet_facing_load_balancers", + "shield_advanced_protection_in_route53_hosted_zones" + ] + } + ] +} diff --git a/compliance/ens_rd2022_aws.json b/compliance/ens_rd2022_aws.json deleted file mode 100644 index 21ae0165..00000000 --- a/compliance/ens_rd2022_aws.json +++ /dev/null @@ -1,1793 +0,0 @@ -{ - "Framework": "ENS", - "Version": "RD2022", - "Requirements": [ - { - "Id": "op.acc.1.aws.iam.1", - "Description": "Proveedor de identidad centralizado", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Para las entidades de los empleados, deberá manejarse un proveedor de identidades que permita administrar las identidades en un lugar centralizado.", - "Nivel": "bajo", - "Tipo": "recomendacion", - "Dimensiones": [ - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_check_saml_providers_sts" - ] - }, - { - "Id": "op.acc.2.aws.iam.3", - "Description": "Requisitos de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Se deberá delegar en cuentas administradoras la administración de la organización, dejando la cuenta maestra sin uso y con las medidas de seguridad pertinentes.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_avoid_root_usage" - ] - }, - { - "Id": "op.acc.3.aws.iam.4", - "Description": "Segregación rigurosa", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Emplear RBAC para separar las funciones de configuración y mantenimiento del sistema (como buena práctica, se puede asignar a los usuarios de mantenimiento la política AWSSupportAccess)", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_support_role_created" - ] - }, - { - "Id": "op.acc.4.aws.iam.1", - "Description": "Proceso de gestión de derechos de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Se restringirá todo acceso a las acciones especificadas para el usuario root de una cuenta.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_no_root_access_key", - "iam_avoid_root_usage" - ] - }, - { - "Id": "op.acc.4.aws.iam.2", - "Description": "Proceso de gestión de derechos de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Configurar las políticas IAM de modo que sólo se permitan los privilegios necesarios para cada rol (privilegio mínimo). Comenzar con el mínimo nivel de permisos e ir añadiendo permisos adicionales según vaya surgiendo la necesidad en lugar de comenzar con permisos administrativos.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "sqs_queues_not_publicly_accessible", - "s3_bucket_policy_public_write_access", - "lambda_function_not_publicly_accessible", - "iam_no_custom_policy_permissive_role_assumption", - "cloudwatch_cross_account_sharing_disabled", - "lambda_function_url_public", - "lambda_function_url_cors_policy", - "iam_customer_managed_policy_least_privilege", - "iam_policy_no_administrative_privileges" - ] - }, - { - "Id": "op.acc.4.aws.iam.5", - "Description": "Proceso de gestión de derechos de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Evitar políticas con comodines (wildcards) en su definición, que puedan otorgar privilegios administrativos completos.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_policy_allows_privilege_escalation", - "iam_no_custom_policy_permissive_role_assumption", - "iam_policy_no_administrative_privileges" - ] - }, - { - "Id": "op.acc.4.aws.iam.1", - "Description": "Proceso de gestión de derechos de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Se configurarán diferentes permisos a las cuentas de usuario, limitando la utilización de la cuenta “root” para tareas específicas que necesiten un nivel de privilegios elevado, esta configuración debe entenderse como un mecanismo para impedir que el trabajo directo con usuarios con privilegios de administrador repercuta negativamente en la seguridad, a acometer todas las acciones con el máximo privilegio cuando este no es siempre requerido.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_policy_no_administrative_privileges" - ] - }, - { - "Id": "op.acc.4.aws.sys.1", - "Description": "Proceso de gestión de derechos de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Habilitar systems manager automation para evitar acceso remoto humano a tareas automatizables.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "ec2_instance_managed_by_ssm" - ] - }, - { - "Id": "op.acc.6.aws.iam.1", - "Description": "Mecanismo de autenticación (usuarios de la organización)", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Evitar el uso permanente de múltiples claves de acceso para un mismo usuario IAM.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_user_two_active_access_key" - ] - }, - { - "Id": "op.acc.6.aws.iam.2", - "Description": "Mecanismo de autenticación (usuarios de la organización)", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Las claves de acceso deberán rotarse cada 90 días o menos.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_disable_90_days_credentials", - "iam_rotate_access_key_90_days" - ] - }, - { - "Id": "op.acc.6.aws.iam.3", - "Description": "Mecanismo de autenticación (usuarios de la organización)", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Se deberá evitar la asignación por defecto de claves de acceso para todos los usuarios que tengan acceso a la consola. ", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_user_no_setup_initial_access_key" - ] - }, - { - "Id": "op.acc.6.aws.iam.4", - "Description": "Mecanismo de autenticación (usuarios de la organización)", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Deberá habilitarse el vencimiento de la contraseña.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_password_policy_expires_90_days", - "iam_disable_90_days_credentials", - "iam_rotate_access_key_90_days" - ] - }, - { - "Id": "op.acc.6.aws.iam.6", - "Description": "Contraseñas", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Las contraseñas de los usuarios deberán tener normas de complejidad mínima y robustez.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_password_policy_lowercase", - "iam_password_policy_minimum_length_14", - "iam_password_policy_number", - "iam_password_policy_reuse_24", - "iam_password_policy_symbol", - "iam_password_policy_uppercase" - ] - }, - { - "Id": "op.acc.6.aws.iam.7", - "Description": "Contraseña + otro factor de autenticación", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "MFA deberá estar habilitado para todas las cuentas que tengan contraseña para acceder a la consola (usuarios IAM).", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_user_mfa_enabled_console_access" - ] - }, - { - "Id": "op.acc.6.aws.iam.9", - "Description": "Certificados en dispositvo físico", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Se deberá usar certificados cualificados en soporte físico como mecanismo de autenticación que usen algoritmos, parámetros y dispositivos autorizados por el Centro Criptológico Nacional.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_user_mfa_enabled", - "iam_root_hardware_mfa_enabled" - ] - }, - { - "Id": "", - "Description": "Certificados en dispositvo físico", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Se deberá usar certificados cualificados en soporte físico como mecanismo de autenticación que usen algoritmos, parámetros y dispositivos autorizados por el Centro Criptológico Nacional.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_administrator_access_with_mfa", - "iam_root_mfa_enabled", - "iam_user_mfa_enabled_console_access" - ] - }, - { - "Id": "op.acc.6.aws.ct.1", - "Description": "Registro", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Habilitar CloudTrail en todas las regiones para el registro de accesos de usuarios.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "cloudtrail_multi_region_enabled" - ] - }, - { - "Id": "op.acc.6.aws.iam.10", - "Description": "Suspensión por no utilización", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Las claves de acceso de los usuarios IAM que no hayan sido empleadas durante un periodo de 90 días o más deberán ser deshabilitadas.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_disable_30_days_credentials", - "iam_disable_90_days_credentials" - ] - }, - { - "Id": "op.acc.6.aws.iam.11", - "Description": "Doble factor para acceso desde o a través de zonas no controladas", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "control de acceso", - "DescripcionControl": "Se deberá emplear como mecanismo de autenticación o bien una contraseña más otro factor de autenticación, o bien un certificado cualificado (con o sin soporte físico) protegido por un doble factor de autenticación.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad" - ] - } - ], - "Checks": [ - "iam_user_mfa_enabled_console_access" - ] - }, - { - "Id": "", - "Description": "Inventario de activos", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Asegurar que AWS Config está habilitado en todas las regiones", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "config_recorder_all_regions_enabled" - ] - }, - { - "Id": "", - "Description": "Inventario de activos", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Asociar etiquetas para todos los activos", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "tags_exist_in_required_resources" - ] - }, - { - "Id": "", - "Description": "Inventario de activos", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Habilitar Inventory para todo el entorno de EC2 en caso de no utilizar herramientas de terceros", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_instance_managed_by_ssm", - "ec2_ssm_managed_compliant_patching" - ] - }, - { - "Id": "", - "Description": "Inventario de activos", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Asignar metadatos personalizados a cada nodo administrado con información sobre el responsable del activo.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "tags_exist_in_required_resources" - ] - }, - { - "Id": "", - "Description": "Gestión de la configuración de seguridad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Utilizar la herramienta AWS Config, AWS Config Rules o Conformance packs para evaluar los componentes del sistema, asegurando:", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "config_recorder_all_regions_enabled" - ] - }, - { - "Id": "", - "Description": "Gestión de la configuración de seguridad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Reacción a vulnerabilidades notificadas", - "Nivel": "bajo", - "Tipo": "recomendacion", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "account_maintain_current_contact_details", - "account_security_contact_information_is_registered", - "account_security_questions_are_registered_in_the_aws_account" - ] - }, - { - "Id": "", - "Description": "Gestión de la configuración de seguridad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Reacción a incidentes", - "Nivel": "bajo", - "Tipo": "recomendacion", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "account_maintain_current_contact_details", - "account_security_contact_information_is_registered", - "account_security_questions_are_registered_in_the_aws_account" - ] - }, - { - "Id": "op.exp.3.aws.cfg.4", - "Description": "Copias de seguridad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Hacer uso de AWS Config para consultar el histórico de configuraciones de recursos.", - "Nivel": "bajo", - "Tipo": "recomendacion", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "config_recorder_all_regions_enabled" - ] - }, - { - "Id": "op.exp.4.aws.sys.2", - "Description": "Mantenimiento y actualizaciones de seguridad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Utilizar AWS Systems Manager Patch Manager para planificar y gestionar la aplicación de parches minimizando los riesgos asociados a tener instancias con software desactualizado y expuesto a vulnerabilidades conocidas.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_ssm_managed_compliant_patching" - ] - }, - { - "Id": "op.exp.4.aws.sma.1", - "Description": "Prevención de fallos", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Utilizar la solución AWS Systems Manager Automation para automatizar las tareas de corrección en servicios de AWS como EC2 y RDS.", - "Nivel": "bajo", - "Tipo": "recomendacion", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_instance_managed_by_ssm", - "ec2_ssm_managed_compliant_patching" - ] - }, - { - "Id": "op.exp.5.aws.ct.1", - "Description": "Gestión de cambios", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Asegurar que CloudTrail esté activo para todas las regiones.", - "Nivel": "bajo", - "Tipo": "recomendacion", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "cloudtrail_multi_region_enabled", - "cloudtrail_security_trail_enabled?" - ] - }, - { - "Id": "op.exp.6.aws.gd.1", - "Description": "Protección frente a código dañino", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Activar la protección contra software malintencionado de GuardDuty en todas las regiones para monitorizar instancias y cargas de trabajo.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "guardduty_is_enabled" - ] - }, - { - "Id": "", - "Description": "Gestión de incidentes", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "La referencia a estas medidas de seguridad puede encontrarse en la guía CCN-STIC 887F Guía de Respuesta a incidentes de seguridad en AWS", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "guardduty_is_enabled", - "guardduty_no_high_severity_findings", - "securityhub_is_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.1", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Asegurar que AWS CloudTrail esté activo para todas las regiones", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_multi_region_enabled" - ] - }, - { - "Id": "op.exp.8.aws.cw.1", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Establecer un filtro de métricas desde AWS CloudWatch para detectar cambios en las configuraciones de CloudTrail", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.2", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Habilitar la validación de archivos en todos los trails, evitando así que estos se vean modificados o eliminados.", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_log_file_validation_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.3", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Habilitar la entrega continua de eventos de CloudTrail a un bucket S3 dedicado.", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_dataevents_enabled", - "cloudtrail_management_events_logging_enabled", - "cloudtrail_logs_s3_bucket_is_not_publicly_accessible", - "cloudtrail_s3_dataevents_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.4", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Se deberán habilitar alertas para las llamadas no permitidas a la API", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_metric_filter_unauthorized_api_calls" - ] - }, - { - "Id": "op.exp.8.aws.ct.5", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Se deberán habilitar alertas para los accesos no permitidos a la consola", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_metric_filter_authentication_failures" - ] - }, - { - "Id": "op.exp.8.aws.ct.6", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Se deberán habilitar alertas para los accesos a la consola sin el correcto uso de MFA", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_metric_filter_sign_in_without_mfa" - ] - }, - { - "Id": "op.exp.8.aws.ct.7", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Se deberán habilitar alertas para toda la actividad realizada sobre y por la cuenta root", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_metric_filter_root_usage" - ] - }, - { - "Id": "op.exp.8.aws.ct.8", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Se deberán habilitar alertas para cualquier cambio sobre las políticas IAM", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_metric_filter_policy_changes" - ] - }, - { - "Id": "op.exp.8.aws.ct.19", - "Description": "Registro de actividad", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Asegurar que AWS CloudTrail esté activo para todas las regiones", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_multi_region_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.10", - "Description": "Revisión de los registros", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Configurar la herramienta CloudTrail de manera que realice el registro de eventos de administración.", - "Nivel": "pytec", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.11", - "Description": "Revisión de los registros", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Configurar la herramienta CloudTrail de manera que realice el registro de eventos de datos.", - "Nivel": "pytec", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_dataevents_enabled", - "cloudtrail_partial_dataevents_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.13", - "Description": "Revisión de los registros", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Registrar los eventos de lectura y escritura de datos.", - "Nivel": "pytec", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_s3_dataevents_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.18", - "Description": "Revisión de los registros", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Habilitar la entrega continua de eventos de CloudTrail a un bucket de Amazon S3", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_logs_s3_bucket_access_logging_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.14", - "Description": "Revisión de los registros", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Integrar CloudTrail con el servicio CloudWatch Logs", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_cloudwatch_logging_enabled" - ] - }, - { - "Id": "op.exp.8.aws.cw.4", - "Description": "Retención de registros", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Ejecutar la acción PutRetentionPolicy de Amazon CloudWatch.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudwatch_log_group_retention_policy_specific_days_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.15", - "Description": "Control de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Configurar los archivos de logs de AWS CloudTrail para aprovechar el cifrado SSE y las CMK de KMS.", - "Nivel": "pytec", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_kms_encryption_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.16", - "Description": "Control de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "El almacén de logs de CloudTrail no debería ser accesible de forma pública", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_logs_s3_bucket_access_logging_enabled" - ] - }, - { - "Id": "op.exp.8.aws.ct.17", - "Description": "Control de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "El almacén de logs de CloudTrail no debería ser accesible de forma pública (ACLs)", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_logs_s3_bucket_is_not_publicly_accessible" - ] - }, - { - "Id": "op.exp.8.aws.kms.1", - "Description": "Control de acceso", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Cifrado de los trails con KMS", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "trazabilidad" - ] - } - ], - "Checks": [ - "cloudtrail_kms_encryption_enabled" - ] - }, - { - "Id": "op.exp.10.aws.kms.3", - "Description": "Protección de claves criptográficas", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Activar la rotación de las claves CMK.", - "Nivel": "pytec", - "Tipo": "medida", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "kms_cmk_rotation_enabled" - ] - }, - { - "Id": "op.exp.10.aws.kms.4", - "Description": "Protección de claves criptográficas", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Deshabilitar todas las claves CMK que no estén en uso.", - "Nivel": "pytec", - "Tipo": "medida", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "kms_disable_or_scheduled_deletion_of_kms_cmks_alarms_enabled" - ] - }, - { - "Id": "op.exp.10.aws.kms.5", - "Description": "Protección de claves criptográficas", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "explotación", - "DescripcionControl": "Eliminar todas las claves CMK deshabilitadas que no mantengan ningún objeto o recurso cifrado.", - "Nivel": "pytec", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "kms_disable_or_scheduled_deletion_of_kms_cmks_alarms_enabled" - ] - }, - { - "Id": "op.mon.1.aws.gd.1", - "Description": "Detección de intrusión", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "monitorización del sistema", - "DescripcionControl": "Habilitar Amazon GuarDuty para la detección de amenazas e intrusiones en ausencia de herramientas de terceros.", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "guardduty_is_enabled" - ] - }, - { - "Id": "op.mon.1.aws.ct.1", - "Description": "Detección de intrusión", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "monitorización del sistema", - "DescripcionControl": "Activar el servicio de eventos AWS CloudTrail para todas las regiones.", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "cloudtrail_multi_region_enabled" - ] - }, - { - "Id": "op.mon.1.aws.vpc.1", - "Description": "Detección de intrusión", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "monitorización del sistema", - "DescripcionControl": "Activar el servicio VPC FlowLogs.", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "vpc_flow_logs_enabled" - ] - }, - { - "Id": "op.mon.1.aws.gd.4", - "Description": "Detección de intrusión", - "Attributes": [ - { - "Marco": "operacional", - "Categoria": "monitorización del sistema", - "DescripcionControl": "Deberá habilitarse Amazon GuardDuty para todas las regiones tanto en la cuenta raíz como en las cuentas miembro de un entorno multi-cuenta.", - "Nivel": "alto", - "Tipo": "medida", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "guardduty_is_enabled" - ] - }, - { - "Id": "mp.com.1.aws.sg.1", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Asegurar que el Security Group por defecto restrinja todo el tráfico.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_default_restrict_traffic" - ] - }, - { - "Id": "mp.com.1.aws.sg.2", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Agregar las reglas del Security Group que se aplica por defecto cuando se crea una VPC.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "extra7173", - "vpc_default_network_acls_allow_all", - "vpc_subnets_with_default_network_acls", - "vpc_unused_default_vpc_removal" - ] - }, - { - "Id": "mp.com.1.aws.sg.3", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Evitar la existencia de Security Groups que dejen abierto todo el tráfico entrante.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_in_use_without_ingress_filtering" - ] - }, - { - "Id": "mp.com.1.aws.sg.4", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Evitar tener (un repositorio de) Security Groups que no estén siendo usados.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_not_used" - ] - }, - { - "Id": "mp.com.1.aws.elb.1", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Habilitar TLS en los balanceadores de carga ELB ", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "elb_ssl_listeners" - ] - }, - { - "Id": "mp.com.1.aws.elb.2", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Evitar el uso de protocolos de cifrado inseguros para las políticas de seguridad de ELB. Esto podría dejar la conexión SSL entre balanceadores y clientes vulnerables a ser explotados. En particular deberá evitarse el uso de TLS 1.0. ", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "elb_insecure_ssl_ciphers" - ] - }, - { - "Id": "mp.com.1.aws.s3.1", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Asegurar que los Buckets S3 de almacenamiento apliquen cifrado para la transferencia de datos empleando Secure Sockets Layer (SSL)", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "s3_bucket_secure_transport_policy" - ] - }, - { - "Id": "mp.com.1.aws.cf.1", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Asegurar que la distribución entre frontales CloudFront y sus orígenes únicamente emplee tráfico HTTPs ", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "cloudfront_distributions_https_enabled" - ] - }, - { - "Id": "mp.com.1.aws.sg.5", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "SSH no deberá ser accesible desde internet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22" - ] - }, - { - "Id": "mp.com.1.aws.sg.6", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "RDP no deberá ser accesible desde internet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_tcp_port_3389", - "vpc_network_acls_no_unrestricted_ssh_rdp" - ] - }, - { - "Id": "mp.com.1.aws.sg.7", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Oracle (TCP/1521 y TCP/2483) no deberá ser accesible desde internet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483" - ] - }, - { - "Id": "mp.com.1.aws.sg.8", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "MySQL (TCP/3306) no deberá ser accesible desde internet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306" - ] - }, - { - "Id": "mp.com.1.aws.sg.9", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Postgres (TCP 5432) no deberá ser accesible desde internet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432" - ] - }, - { - "Id": "mp.com.1.aws.sg.10", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Redis (TCP/6397) no deberá ser accesible desde internet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379" - ] - }, - { - "Id": "mp.com.1.aws.sg.11", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "MongoDB (TCP/7199 - TCP/9160 y TCP/888) no deberá ser accesible desde internet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018" - ] - }, - { - "Id": "mp.com.1.aws.sg.12", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Memcached (TCP/11211) no deberá ser accesible desde ineternet.", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211" - ] - }, - { - "Id": "mp.com.1.aws.sg.13", - "Description": "Perímetro seguro", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Cassandra (TCP/7199- TCP/9160 y TCP/8888) no deberá ser accesible desde internet", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "confidencialidad", - "integridad", - "trazabilidad", - "autenticidad", - "disponibilidad" - ] - } - ], - "Checks": [ - "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888" - ] - }, - { - "Id": "mp.com.3.aws.elb.1", - "Description": "Protección de la integridad y de la autenticidad", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Habilitar TLS en los balanceadores de carga ELB", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "integridad", - "autenticidad" - ] - } - ], - "Checks": [ - "ec2_elbv2_insecure_ssl_ciphers" - ] - }, - { - "Id": "mp.com.3.aws.elb.2", - "Description": "Protección de la integridad y de la autenticidad", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Evitar el uso de protocolos de cifrado inseguros en la conexión TLS entre clientes y balanceadores de carga", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "integridad", - "autenticidad" - ] - } - ], - "Checks": [ - "ec2_elbv2_insecure_ssl_ciphers" - ] - }, - { - "Id": "mp.com.3.aws.s3.1", - "Description": "Protección de la integridad y de la autenticidad", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Asegurar que los Buckets de almacenamiento S3 apliquen cifrado para la transferencia de datos empleando TLS", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "integridad", - "autenticidad" - ] - } - ], - "Checks": [ - "s3_bucket_secure_transport_policy" - ] - }, - { - "Id": "mp.com.3.aws.cf.1", - "Description": "Protección de la integridad y de la autenticidad", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de las comunicaciones", - "DescripcionControl": "Asegurar que la distribución entre frontales CloudFront y sus orígenes únicamente emplee tráfico HTTPS", - "Nivel": "alto", - "Tipo": "requisito", - "Dimensiones": [ - "integridad", - "autenticidad" - ] - } - ], - "Checks": [ - "cloudfront_distributions_https_enabled" - ] - }, - { - "Id": "mp.si.2.aws.ebs.2", - "Description": "Copias de seguridad", - "Attributes": [ - { - "Marco": "medidas de protección", - "Categoria": "protección de los soportes de información", - "DescripcionControl": "Se deberá asegurar el cifrado de las copias de seguridad de EBS.", - "Nivel": "alto", - "Tipo": "refuerzo", - "Dimensiones": [ - "confidencialidad", - "integridad" - ] - } - ], - "Checks": [ - "ec2_ebs_snapshot_encryption" - ] - } - ] -} \ No newline at end of file diff --git a/config/config.py b/config/config.py index ba8abda4..d04c79cd 100644 --- a/config/config.py +++ b/config/config.py @@ -15,6 +15,9 @@ banner_color = "\033[1;92m" # Groups groups_file = "groups.json" +# Compliance +compliance_specification_dir = "./compliance" + # AWS services-regions matrix json aws_services_json_file = "providers/aws/aws_regions_by_service.json" diff --git a/lib/check/check.py b/lib/check/check.py index ab52d462..2a3308a3 100644 --- a/lib/check/check.py +++ b/lib/check/check.py @@ -1,4 +1,7 @@ +import functools import importlib +import os +import sys from pkgutil import walk_packages from types import ModuleType from typing import Any @@ -6,7 +9,8 @@ from typing import Any from alive_progress import alive_bar from colorama import Fore, Style -from config.config import groups_file, orange_color +from config.config import compliance_specification_dir, groups_file, orange_color +from lib.check.compliance_models import load_compliance_framework from lib.check.models import Check, Output_From_Options, load_check_metadata from lib.logger import logger from lib.outputs.outputs import report @@ -31,6 +35,29 @@ def bulk_load_checks_metadata(provider: str) -> dict: return bulk_check_metadata +# Bulk load all compliance frameworks specification +def bulk_load_compliance_frameworks(provider: str) -> dict: + """Bulk load all compliance frameworks specification into a dict""" + bulk_compliance_frameworks = {} + compliance_specification_dir_path = f"{compliance_specification_dir}/{provider}" + try: + for filename in os.listdir(compliance_specification_dir_path): + file_path = os.path.join(compliance_specification_dir_path, filename) + # Check if it is a file + if os.path.isfile(file_path): + # Open Compliance file in JSON + # cis_v1.4_aws.json --> cis_v1.4_aws + compliance_framework_name = filename.split(".json")[0] + # Store the compliance info + bulk_compliance_frameworks[ + compliance_framework_name + ] = load_compliance_framework(file_path) + except Exception as e: + logger.error(f"{e.__class__.__name__} -- {e}") + + return bulk_compliance_frameworks + + # Exclude checks to run def exclude_checks_to_run(checks_to_execute: set, excluded_checks: list) -> set: for check in excluded_checks: @@ -101,16 +128,43 @@ def print_services(service_list: set): print(f"- {service}") -def print_checks(provider: str, check_list: set, bulk_checks_metadata: dict): +def print_compliance_frameworks(bulk_compliance_frameworks: dict): + print( + f"There are {Fore.YELLOW}{len(bulk_compliance_frameworks.keys())}{Style.RESET_ALL} available Compliance Frameworks: \n" + ) + for framework in bulk_compliance_frameworks.keys(): + print(f"\t- {Fore.YELLOW}{framework}{Style.RESET_ALL}") + + +def print_compliance_requirements(bulk_compliance_frameworks: dict): + if bulk_compliance_frameworks and "ens_rd2022_aws" in bulk_compliance_frameworks: + print("Listing ENS RD2022 AWS Compliance Requirements:\n") + for compliance in bulk_compliance_frameworks.values(): + for requirement in compliance.Requirements: + checks = "" + for check in requirement.Checks: + checks += f" {Fore.YELLOW}\t\t{check}\n{Style.RESET_ALL}" + print( + f"Requirement Id: {Fore.MAGENTA}{requirement.Id}{Style.RESET_ALL}\n\t- Description: {requirement.Description}\n\t- Checks:\n{checks}" + ) + + +def print_checks( + provider: str, + check_list: set, + bulk_checks_metadata: dict, +): for check in check_list: try: print( f"[{bulk_checks_metadata[check].CheckID}] {bulk_checks_metadata[check].CheckTitle} - {Fore.MAGENTA}{bulk_checks_metadata[check].ServiceName} {Fore.YELLOW}[{bulk_checks_metadata[check].Severity}]{Style.RESET_ALL}" ) except KeyError as error: - logger.error( + logger.critical( f"Check {error} was not found for the {provider.upper()} provider" ) + sys.exit() + print( f"\nThere are {Fore.YELLOW}{len(check_list)}{Style.RESET_ALL} available checks.\n" ) @@ -150,21 +204,51 @@ def load_checks_to_execute_from_groups( return checks_to_execute +# Parse checks from compliance frameworks specification +def parse_checks_from_compliance_framework( + compliance_frameworks: list, bulk_compliance_frameworks: dict +) -> list: + """Parse checks from compliance frameworks specification""" + checks_to_execute = set() + try: + for framework in compliance_frameworks: + # compliance_framework_json["Requirements"][*]["Checks"] + compliance_framework_checks_list = [ + requirement.Checks + for requirement in bulk_compliance_frameworks[framework].Requirements + ] + # Reduce nested list into a list + # Pythonic functional magic + compliance_framework_checks = functools.reduce( + lambda x, y: x + y, compliance_framework_checks_list + ) + # Then union this list of checks with the initial one + checks_to_execute = checks_to_execute.union(compliance_framework_checks) + except Exception as e: + logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}") + + return checks_to_execute + + # Recover all checks from the selected provider and service def recover_checks_from_provider(provider: str, service: str = None) -> list: - checks = [] - modules = list_modules(provider, service) - for module_name in modules: - # Format: "providers.{provider}.services.{service}.{check_name}.{check_name}" - check_name = module_name.name - # We need to exclude common shared libraries in services - if ( - check_name.count(".") == 5 - and "lib" not in check_name - and "test" not in check_name - ): - checks.append(check_name) - return checks + try: + checks = [] + modules = list_modules(provider, service) + for module_name in modules: + # Format: "providers.{provider}.services.{service}.{check_name}.{check_name}" + check_name = module_name.name + # We need to exclude common shared libraries in services + if ( + check_name.count(".") == 5 + and "lib" not in check_name + and "test" not in check_name + ): + checks.append(check_name) + return checks + except Exception as e: + logger.critical(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}]: {e}") + sys.exit() # List all available modules in the selected provider and service @@ -184,6 +268,7 @@ def import_check(check_path: str) -> ModuleType: return lib +# Sets the Output_From_Options to be used in the output modes def set_output_options( quiet: bool, output_modes: list, @@ -191,8 +276,10 @@ def set_output_options( security_hub_enabled: bool, output_filename: str, allowlist_file: str, + bulk_checks_metadata: dict, verbose: bool, ): + """Sets the Output_From_Options to be used in the output modes""" global output_options output_options = Output_From_Options( is_quiet=quiet, @@ -201,6 +288,7 @@ def set_output_options( security_hub_enabled=security_hub_enabled, output_filename=output_filename, allowlist_file=allowlist_file, + bulk_checks_metadata=bulk_checks_metadata, verbose=verbose, # set input options here ) @@ -211,15 +299,15 @@ def run_check(check: Check, output_options: Output_From_Options) -> list: findings = [] if output_options.verbose or output_options.is_quiet: print( - f"\nCheck ID: {check.checkID} - {Fore.MAGENTA}{check.serviceName}{Fore.YELLOW} [{check.severity}]{Style.RESET_ALL}" + f"\nCheck ID: {check.CheckID} - {Fore.MAGENTA}{check.ServiceName}{Fore.YELLOW} [{check.Severity}]{Style.RESET_ALL}" ) - logger.debug(f"Executing check: {check.checkID}") + logger.debug(f"Executing check: {check.CheckID}") try: findings = check.execute() except Exception as error: - print(f"Something went wrong in {check.checkID}, please use --log-level ERROR") + print(f"Something went wrong in {check.CheckID}, please use --log-level ERROR") logger.error( - f"{check.checkID} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + f"{check.CheckID} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) finally: return findings @@ -264,13 +352,14 @@ def execute_checks( # If check does not exists in the provider or is from another provider except ModuleNotFoundError: - logger.error( + logger.critical( f"Check '{check_name}' was not found for the {provider.upper()} provider" ) + bar.title = f"-> {Fore.RED}Scan was aborted!{Style.RESET_ALL}" + sys.exit() except Exception as error: logger.error( f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) - bar.title = f"-> {Fore.GREEN}Scan is completed!" - print(Style.RESET_ALL) + bar.title = f"-> {Fore.GREEN}Scan is completed!{Style.RESET_ALL}" return all_findings diff --git a/lib/check/check_test.py b/lib/check/check_test.py index d152931f..97b26dbf 100644 --- a/lib/check/check_test.py +++ b/lib/check/check_test.py @@ -1,10 +1,13 @@ import os +from unittest import mock from lib.check.check import ( + bulk_load_compliance_frameworks, exclude_checks_to_run, exclude_groups_to_run, exclude_services_to_run, load_checks_to_execute_from_groups, + parse_checks_from_compliance_framework, parse_checks_from_file, parse_groups_from_file, ) @@ -12,17 +15,6 @@ from lib.check.models import load_check_metadata class Test_Check: - # def test_import_check(self): - # test_cases = [ - # { - # "name": "Test valid check path", - # "input": "providers.aws.services.iam.iam_disable_30_days_credentials.iam_disable_30_days_credentials", - # "expected": "providers.aws.services.iam.iam_disable_30_days_credentials.iam_disable_30_days_credentials", - # } - # ] - # for test in test_cases: - # assert importlib.import_module(test["input"]).__name__ == test["expected" - def test_parse_groups_from_file(self): test_cases = [ { @@ -222,3 +214,75 @@ class Test_Check: exclude_services_to_run(checks_to_run, excluded_services, provider) == test["expected"] ) + + def test_parse_checks_from_compliance_framework_two(self): + test_case = { + "input": {"compliance_frameworks": ["cis_v1.4_aws", "ens_v3_aws"]}, + "expected": { + "vpc_flow_logs_enabled", + "ec2_ebs_snapshot_encryption", + "iam_user_mfa_enabled_console_access", + "cloudtrail_multi_region_enabled", + "ec2_elbv2_insecure_ssl_ciphers", + "guardduty_is_enabled", + "s3_bucket_default_encryption", + "cloudfront_distributions_https_enabled", + "iam_avoid_root_usage", + "s3_bucket_secure_transport_policy", + }, + } + with mock.patch( + "lib.check.check.compliance_specification_dir", + new=f"{os.path.dirname(os.path.realpath(__file__))}/fixtures", + ): + provider = "aws" + bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider) + compliance_frameworks = test_case["input"]["compliance_frameworks"] + assert ( + parse_checks_from_compliance_framework( + compliance_frameworks, bulk_compliance_frameworks + ) + == test_case["expected"] + ) + + def test_parse_checks_from_compliance_framework_one(self): + test_case = { + "input": {"compliance_frameworks": ["cis_v1.4_aws"]}, + "expected": { + "iam_user_mfa_enabled_console_access", + "s3_bucket_default_encryption", + "iam_avoid_root_usage", + }, + } + with mock.patch( + "lib.check.check.compliance_specification_dir", + new=f"{os.path.dirname(os.path.realpath(__file__))}/fixtures", + ): + provider = "aws" + bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider) + compliance_frameworks = test_case["input"]["compliance_frameworks"] + assert ( + parse_checks_from_compliance_framework( + compliance_frameworks, bulk_compliance_frameworks + ) + == test_case["expected"] + ) + + def test_parse_checks_from_compliance_framework_no_compliance(self): + test_case = { + "input": {"compliance_frameworks": []}, + "expected": set(), + } + with mock.patch( + "lib.check.check.compliance_specification_dir", + new=f"{os.path.dirname(os.path.realpath(__file__))}/fixtures", + ): + provider = "aws" + bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider) + compliance_frameworks = test_case["input"]["compliance_frameworks"] + assert ( + parse_checks_from_compliance_framework( + compliance_frameworks, bulk_compliance_frameworks + ) + == test_case["expected"] + ) diff --git a/lib/check/checks_loader.py b/lib/check/checks_loader.py index 3638a622..b3a5437b 100644 --- a/lib/check/checks_loader.py +++ b/lib/check/checks_loader.py @@ -1,6 +1,7 @@ from config.config import groups_file -from lib.check.check import ( +from lib.check.check import ( # load_checks_to_execute_from_compliance_framework, load_checks_to_execute_from_groups, + parse_checks_from_compliance_framework, parse_checks_from_file, parse_groups_from_file, recover_checks_from_provider, @@ -8,18 +9,20 @@ from lib.check.check import ( from lib.logger import logger -# Generate the list of checks to execute -# test this function +# Generate the list of checks to execute +# PENDING Test for this function def load_checks_to_execute( bulk_checks_metadata: dict, + bulk_compliance_frameworks: dict, checks_file: str, check_list: list, service_list: list, group_list: list, severities: list, + compliance_frameworks: list, provider: str, ) -> set: - + """Generate the list of checks to execute based on the cloud provider and input arguments specified""" checks_to_execute = set() # Handle if there are checks passed using -c/--checks @@ -39,7 +42,7 @@ def load_checks_to_execute( try: checks_to_execute = parse_checks_from_file(checks_file, provider) except Exception as e: - logger.error(f"{e.__class__.__name__} -- {e}") + logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}") # Handle if there are services passed using -s/--services elif service_list: @@ -65,7 +68,16 @@ def load_checks_to_execute( available_groups, group_list, provider ) except Exception as e: - logger.error(f"{e.__class__.__name__} -- {e}") + logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}") + + # Handle if there are compliance frameworks passed using --compliance + elif compliance_frameworks: + try: + checks_to_execute = parse_checks_from_compliance_framework( + compliance_frameworks, bulk_compliance_frameworks + ) + except Exception as e: + logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}") # If there are no checks passed as argument else: @@ -73,7 +85,7 @@ def load_checks_to_execute( # Get all check modules to run with the specific provider checks = recover_checks_from_provider(provider) except Exception as e: - logger.error(f"{e.__class__.__name__} -- {e}") + logger.error(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}") else: for check_name in checks: # Recover check name from import path (last part) diff --git a/lib/check/compliance.py b/lib/check/compliance.py new file mode 100644 index 00000000..1ac54611 --- /dev/null +++ b/lib/check/compliance.py @@ -0,0 +1,43 @@ +import sys + +from lib.check.compliance_models import Compliance_Base_Model, Compliance_Requirement +from lib.logger import logger + + +def update_checks_metadata_with_compliance( + bulk_compliance_frameworks: dict, bulk_checks_metadata: dict +): + """Update the check metadata model with the compliance framework""" + try: + for check in bulk_checks_metadata: + check_compliance = [] + for framework in bulk_compliance_frameworks.values(): + for requirement in framework.Requirements: + compliance_requirements = [] + if check in requirement.Checks: + # Create the Compliance_Requirement + requirement = Compliance_Requirement( + Id=requirement.Id, + Description=requirement.Description, + Attributes=requirement.Attributes, + Checks=requirement.Checks, + ) + # For the check metadata we don't need the "Checks" key + delattr(requirement, "Checks") + # Include the requirment into the check's framework requirements + compliance_requirements.append(requirement) + # Create the Compliance_Model + compliance = Compliance_Base_Model( + Framework=framework.Framework, + Provider=framework.Provider, + Version=framework.Version, + Requirements=compliance_requirements, + ) + # Include the compliance framework for the check + check_compliance.append(compliance) + # Save it into the check's metadata + bulk_checks_metadata[check].Compliance = check_compliance + return bulk_checks_metadata + except Exception as e: + logger.critical(f"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}") + sys.exit() diff --git a/lib/check/compliance_models.py b/lib/check/compliance_models.py new file mode 100644 index 00000000..f019cb15 --- /dev/null +++ b/lib/check/compliance_models.py @@ -0,0 +1,75 @@ +import sys +from enum import Enum +from typing import Any, List, Optional, Union + +from pydantic import BaseModel, ValidationError + +from lib.logger import logger + + +# ENS - Esquema Nacional de Seguridad - España +class ENS_Requirements_Nivel(str, Enum): + """ENS V3 Requirements Level""" + + bajo = "bajo" + medio = "medio" + alto = "alto" + pytec = "pytec" + + +class ENS_Requirements_Dimensiones(str, Enum): + """ENS V3 Requirements Dimensions""" + + confidencialidad = "confidencialidad" + integridad = "integridad" + trazabilidad = "trazabilidad" + autenticidad = "autenticidad" + disponibilidad = "disponibilidad" + + +class ENS_Requirements(BaseModel): + """ENS V3 Framework Requirements""" + + IdGrupoControl: str + Marco: str + Categoria: str + Descripcion_Control: str + Nivel: list[ENS_Requirements_Nivel] + Dimensiones: list[ENS_Requirements_Dimensiones] + + +# Base Compliance Model +class Compliance_Requirement(BaseModel): + """Compliance_Requirement holds the base model for every requirement within a compliance framework""" + + Id: str + Description: str + Attributes: list[Union[ENS_Requirements, Any]] + Checks: List[str] + + +class Compliance_Base_Model(BaseModel): + """Compliance_Base_Model holds the base model for every compliance framework""" + + Framework: str + Provider: Optional[str] + Version: str + Requirements: list[Compliance_Requirement] + + +# Testing Pending +def load_compliance_framework( + compliance_specification_file: str, +) -> Compliance_Base_Model: + """load_compliance_framework loads and parse a Compliance Framework Specification""" + try: + compliance_framework = Compliance_Base_Model.parse_file( + compliance_specification_file + ) + except ValidationError as error: + logger.critical( + f"Compliance Framework Specification from {compliance_specification_file} is not valid: {error}" + ) + sys.exit() + else: + return compliance_framework diff --git a/lib/check/fixtures/aws/cis_v1.4_aws.json b/lib/check/fixtures/aws/cis_v1.4_aws.json new file mode 100644 index 00000000..0f124d23 --- /dev/null +++ b/lib/check/fixtures/aws/cis_v1.4_aws.json @@ -0,0 +1,82 @@ +{ + "Framework": "CIS", + "Provider": "AWS", + "Version": "1.4", + "Requirements": [ + { + "Id": "1.4", + "Description": "Ensure no 'root' user account access key exists (Automated)", + "Attributes": [ + { + "Section": "1. Identity and Access Management (IAM)", + "Level": [ + "level1" + ], + "Rationale": "Removing access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, removing the 'root' access keys encourages the creation and use of role based accounts that are least privileged.", + "Guidance": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be removed.", + "Additional information": "IAM User account \"root\" for us-gov cloud regions is not enabled by default. However, on request to AWS support enables 'root' access only through access-keys (CLI, API methods) for us-gov cloud region.", + "References": [ + "CCE-78910-7", + "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html", + "https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html", + "https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html", + "https://aws.amazon.com/blogs/security/an-easier-way-to-determine-the-presence-of-aws-account-access-keys/" + ] + } + ], + "Checks": [ + "iam_avoid_root_usage" + ] + }, + { + "Id": "1.10", + "Description": "Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password (Automated)", + "Attributes": [ + { + "Section": "1. Identity and Access Management (IAM)", + "Level": [ + "level1" + ], + "Guidance": "Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.", + "Rationale": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.", + "Impact": "AWS will soon end support for SMS multi-factor authentication (MFA). New customers are not allowed to use this feature. We recommend that existing customers switch to one of the following alternative methods of MFA.", + "Additional information": "Forced IAM User Self-Service Remediation. Amazon has published a pattern that forces users to self-service setup MFA before they have access to their complete permissions set. Until they complete this step, they cannot access their full permissions. This pattern can be used on new AWS accounts. It can also be used on existing accounts - it is recommended users are given instructions and a grace period to accomplish MFA enrollment before active enforcement on existing AWS accounts.", + "References": [ + "CCE-78901-6", + "https://tools.ietf.org/html/rfc6238", + "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html", + "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users", + "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html", + "https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users" + ] + } + ], + "Checks": [ + "iam_user_mfa_enabled_console_access" + ] + }, + { + "Id": "2.1.1", + "Description": "Ensure all S3 buckets employ encryption-at-rest (Automated)", + "Attributes": [ + { + "Section": "2. Storage", + "Level": [ + "level2" + ], + "Guidance": "Amazon S3 provides a variety of no, or low, cost encryption options to protect data at rest.", + "Rationale": "Encrypting data at rest reduces the likelihood that it is unintentionally exposed and can nullify the impact of disclosure if the encryption remains unbroken.", + "Impact": "Amazon S3 buckets with default bucket encryption using SSE-KMS cannot be used as destination buckets for Amazon S3 server access logging. Only SSE-S3 default encryption is supported for server access log destination buckets.", + "Additional information": "S3 bucket encryption only applies to objects as they are placed in the bucket. Enabling S3 bucket encryption does not encrypt objects previously stored within the bucket", + "References": [ + "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/default-bucket-encryption.html", + "https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-related-resources" + ] + } + ], + "Checks": [ + "s3_bucket_default_encryption" + ] + } + ] +} diff --git a/lib/check/fixtures/aws/ens_v3_aws.json b/lib/check/fixtures/aws/ens_v3_aws.json new file mode 100644 index 00000000..eb65dc25 --- /dev/null +++ b/lib/check/fixtures/aws/ens_v3_aws.json @@ -0,0 +1,82 @@ +{ + "Framework": "ENS", + "Version": "3", + "Requirements": [ + { + "Id": "op.mon.1", + "Description": "Detección de intrusión", + "Attributes": [ + { + "Marco": "operacional", + "Categoria": "monitorización del sistema", + "Descripcion_Control": "- En ausencia de otras herramientas de terceros, habilitar Amazon GuarDuty para la detección de amenazas e intrusiones..- Activar el servicio de eventos AWS CloudTrail para todas las regiones..- Activar el servicio VPC FlowLogs..-Deberá habilitarse Amazon GuardDuty para todas las regiones tanto en la cuenta raíz como en las cuentas miembro de un entorno multi-cuenta..-Todas las cuentas miembro deberán estar añadidas para la supervisión bajo la cuenta raíz..-La adminsitración de Amazon GuardDuty quedará delegada exclusivamente a la cuenta de seguridad para garantizar una correcta asignación de los roles para este servicio.", + "Nivel": [ + "bajo", + "medio", + "alto" + ], + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad", + "disponibilidad" + ] + } + ], + "Checks": [ + "guardduty_is_enabled", + "cloudtrail_multi_region_enabled", + "vpc_flow_logs_enabled", + "guardduty_is_enabled" + ] + }, + { + "Id": "op.mon.3", + "Description": "Protección de la integridad y de la autenticidad", + "Attributes": [ + { + "Marco": "operacional", + "Categoria": "protección de las comunicaciones", + "Descripcion_Control": "- Habilitar TLS en los balanceadores de carga ELB.- Evitar el uso de protocolos de cifrado inseguros en la conexión TLS entre clientes y balanceadores de carga.- Asegurar que los Buckets de almacenamiento S3 apliquen cifrado para la transferencia de datos empleando TLS.- Asegurar que la distribución entre frontales CloudFront y sus orígenes únicamente emplee tráfico HTTPS.", + "Nivel": [ + "bajo", + "medio", + "alto" + ], + "Dimensiones": [ + "integridad", + "autenticidad" + ] + } + ], + "Checks": [ + "ec2_elbv2_insecure_ssl_ciphers", + "ec2_elbv2_insecure_ssl_ciphers", + "s3_bucket_secure_transport_policy", + "cloudfront_distributions_https_enabled" + ] + }, + { + "Id": "mp.si.2.r2.1", + "Description": "Copias de seguridad", + "Attributes": [ + { + "Marco": "medidas de protección", + "Categoria": "protección de los soportes de información", + "Descripcion_Control": "Se deberá asegurar el cifrado de las copias de seguridad de EBS.", + "Nivel": [ + "alto" + ], + "Dimensiones": [ + "confidencialidad", + "integridad" + ] + } + ], + "Checks": [ + "ec2_ebs_snapshot_encryption" + ] + } + ] +} diff --git a/lib/check/models.py b/lib/check/models.py index 03484494..10d26dc1 100644 --- a/lib/check/models.py +++ b/lib/check/models.py @@ -1,7 +1,6 @@ import sys from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import List from pydantic import BaseModel, ValidationError @@ -10,34 +9,21 @@ from lib.logger import logger @dataclass class Output_From_Options: + """Class to store the Prowler output modes options""" + is_quiet: bool output_modes: list output_directory: str security_hub_enabled: bool output_filename: str allowlist_file: str + bulk_checks_metadata: dict verbose: str -# Testing Pending -def load_check_metadata(metadata_file: str) -> dict: - try: - check_metadata = Check_Metadata_Model.parse_file(metadata_file) - except ValidationError as error: - logger.critical(f"Metadata from {metadata_file} is not valid: {error}") - sys.exit() - else: - return check_metadata - - -class ComplianceItem(BaseModel): - Control: List[str] - Framework: str - Group: List[str] - Version: str - - class Code(BaseModel): + """Check's remediation information using IaC like CloudFormation, Terraform or the native CLI""" + NativeIaC: str Terraform: str CLI: str @@ -45,22 +31,26 @@ class Code(BaseModel): class Recommendation(BaseModel): + """Check's recommendation information""" + Text: str Url: str class Remediation(BaseModel): + """Check's remediation: Code and Recommendation""" + Code: Code Recommendation: Recommendation class Check_Metadata_Model(BaseModel): + """Check Metadata Model""" + Provider: str CheckID: str - # CheckName: str CheckTitle: str - # CheckAlias: str - CheckType: List[str] + CheckType: list[str] ServiceName: str SubServiceName: str ResourceIdTemplate: str @@ -70,151 +60,67 @@ class Check_Metadata_Model(BaseModel): Risk: str RelatedUrl: str Remediation: Remediation - Categories: List[str] + Categories: list[str] Tags: dict - DependsOn: List[str] - RelatedTo: List[str] + DependsOn: list[str] + RelatedTo: list[str] Notes: str - Compliance: List[ComplianceItem] + # We set the compliance to None to + # store the compliance later if supplied + Compliance: list = None -class Check(ABC): - def __init__(self): - # Load metadata from check +class Check(ABC, Check_Metadata_Model): + """Prowler Check""" + + def __init__(self, **data): + """Check's init function. Calls the CheckMetadataModel init.""" + # Parse the Check's metadata file check_path_name = self.__class__.__module__.replace(".", "/") metadata_file = f"{check_path_name}.metadata.json" - self.__check_metadata__ = load_check_metadata(metadata_file) - # Assign metadata values - self.__Provider__ = self.__check_metadata__.Provider - self.__CheckID__ = self.__check_metadata__.CheckID - # self.__CheckName__ = self.__check_metadata__.CheckName - self.__CheckTitle__ = self.__check_metadata__.CheckTitle - # self.__CheckAlias__ = self.__check_metadata__.CheckAlias - self.__CheckType__ = self.__check_metadata__.CheckType - self.__ServiceName__ = self.__check_metadata__.ServiceName - self.__SubServiceName__ = self.__check_metadata__.SubServiceName - self.__ResourceIdTemplate__ = self.__check_metadata__.ResourceIdTemplate - self.__Severity__ = self.__check_metadata__.Severity - self.__ResourceType__ = self.__check_metadata__.ResourceType - self.__Description__ = self.__check_metadata__.Description - self.__Risk__ = self.__check_metadata__.Risk - self.__RelatedUrl__ = self.__check_metadata__.RelatedUrl - self.__Remediation__ = self.__check_metadata__.Remediation - self.__Categories__ = self.__check_metadata__.Categories - self.__Tags__ = self.__check_metadata__.Tags - self.__DependsOn__ = self.__check_metadata__.DependsOn - self.__RelatedTo__ = self.__check_metadata__.RelatedTo - self.__Notes__ = self.__check_metadata__.Notes - self.__Compliance__ = self.__check_metadata__.Compliance + # Store it to validate them with Pydantic + data = Check_Metadata_Model.parse_file(metadata_file).dict() + # Calls parents init function + super().__init__(**data) - @property - def provider(self): - return self.__Provider__ - - @property - def checkID(self): - return self.__CheckID__ - - # @property - # def checkName(self): - # return self.__CheckName__ - - @property - def checkTitle(self): - return self.__CheckTitle__ - - # @property - # def checkAlias(self): - # return self.__CheckAlias__ - - @property - def checkType(self): - return self.__CheckType__ - - @property - def serviceName(self): - return self.__ServiceName__ - - @property - def subServiceName(self): - return self.__SubServiceName__ - - @property - def resourceIdTemplate(self): - return self.__ResourceIdTemplate__ - - @property - def severity(self): - return self.__Severity__ - - @property - def resourceType(self): - return self.__ResourceType__ - - @property - def description(self): - return self.__Description__ - - @property - def relatedUrl(self): - return self.__RelatedUrl__ - - @property - def risk(self): - return self.__Risk__ - - @property - def remediation(self): - return self.__Remediation__ - - @property - def categories(self): - return self.__Categories__ - - @property - def tags(self): - return self.__Tags__ - - @property - def dependsOn(self): - return self.__DependsOn__ - - @property - def relatedTo(self): - return self.__RelatedTo__ - - @property - def notes(self): - return self.__Notes__ - - @property - def compliance(self): - return self.__Compliance__ - - @property - def metadata(self): - return self.__check_metadata__ + def metadata(self) -> dict: + """Return the JSON representation of the check's metadata""" + return self.json() @abstractmethod def execute(self): - pass + """Execute the check's logic""" @dataclass class Check_Report: + """Contains the Check's finding information.""" + status: str region: str status_extended: str - check_metadata: dict + check_metadata: Check_Metadata_Model resource_id: str resource_details: str resource_tags: list resource_arn: str def __init__(self, metadata): - self.check_metadata = metadata + self.check_metadata = Check_Metadata_Model.parse_raw(metadata) self.status_extended = "" self.resource_details = "" self.resource_tags = [] self.resource_id = "" self.resource_arn = "" + + +# Testing Pending +def load_check_metadata(metadata_file: str) -> Check_Metadata_Model: + """load_check_metadata loads and parse a Check's metadata file""" + try: + check_metadata = Check_Metadata_Model.parse_file(metadata_file) + except ValidationError as error: + logger.critical(f"Metadata from {metadata_file} is not valid: {error}") + sys.exit() + else: + return check_metadata diff --git a/lib/outputs/models.py b/lib/outputs/models.py index dafe4a99..e7990fba 100644 --- a/lib/outputs/models.py +++ b/lib/outputs/models.py @@ -4,7 +4,7 @@ from typing import List, Optional from pydantic import BaseModel from config.config import timestamp -from lib.check.models import Check_Report, ComplianceItem, Remediation +from lib.check.models import Check_Report, Remediation from providers.aws.lib.audit_info.models import AWS_Organizations_Info @@ -25,7 +25,6 @@ class Check_Output_JSON(BaseModel): OrganizationsInfo: Optional[AWS_Organizations_Info] Region: str = "" CheckID: str - # CheckName: str CheckTitle: str CheckType: List[str] ServiceName: str @@ -46,7 +45,7 @@ class Check_Output_JSON(BaseModel): DependsOn: List[str] RelatedTo: List[str] Notes: str - Compliance: List[ComplianceItem] + # Compliance: List[ComplianceItem] # JSON ASFF Output @@ -92,6 +91,26 @@ class Check_Output_JSON_ASFF(BaseModel): Remediation: dict = None +class Check_Output_CSV_ENS_RD2022(BaseModel): + Provider: str + AccountId: str + Region: str + AssessmentDate: str + Requirements_Id: str + Requirements_Description: str + Requirements_Attributes_IdGrupoControl: str + Requirements_Attributes_Marco: str + Requirements_Attributes_Categoria: str + Requirements_Attributes_DescripcionControl: str + Requirements_Attributes_Nivel: str + Requirements_Attributes_Tipo: str + Requirements_Attributes_Dimensiones: str + Status: str + StatusExtended: str + ResourceId: str + CheckId: str + + @dataclass class Check_Output_CSV: assessment_start_time: str @@ -106,7 +125,6 @@ class Check_Output_CSV: account_tags: str region: str check_id: str - # check_name: str check_title: str check_type: str status: str @@ -132,7 +150,7 @@ class Check_Output_CSV: depends_on: str related_to: str notes: str - compliance: str + # compliance: str def get_csv_header(self): csv_header = [] @@ -160,7 +178,6 @@ class Check_Output_CSV: self.account_tags = organizations.account_details_tags self.region = report.region self.check_id = report.check_metadata.CheckID - # self.check_name = report.check_metadata.CheckName self.check_title = report.check_metadata.CheckTitle self.check_type = report.check_metadata.CheckType self.status = report.status @@ -198,7 +215,7 @@ class Check_Output_CSV: self.depends_on = self.__unroll_list__(report.check_metadata.DependsOn) self.related_to = self.__unroll_list__(report.check_metadata.RelatedTo) self.notes = report.check_metadata.Notes - self.compliance = self.__unroll_compliance__(report.check_metadata.Compliance) + # self.compliance = self.__unroll_compliance__(report.check_metadata.Compliance) def __unroll_list__(self, listed_items: list): unrolled_items = "" diff --git a/lib/outputs/outputs.py b/lib/outputs/outputs.py index 0a442d08..151f01ce 100644 --- a/lib/outputs/outputs.py +++ b/lib/outputs/outputs.py @@ -2,6 +2,8 @@ import json import os import sys from csv import DictWriter +from io import TextIOWrapper +from typing import Any from colorama import Fore, Style from tabulate import tabulate @@ -12,12 +14,14 @@ from config.config import ( json_file_suffix, orange_color, prowler_version, + timestamp, timestamp_iso, timestamp_utc, ) from lib.logger import logger from lib.outputs.models import ( Check_Output_CSV, + Check_Output_CSV_ENS_RD2022, Check_Output_JSON, Check_Output_JSON_ASFF, Compliance, @@ -32,18 +36,17 @@ from providers.aws.lib.security_hub.security_hub import send_to_security_hub def report(check_findings, output_options, audit_info): + # Sort check findings check_findings.sort(key=lambda x: x.region) - csv_fields = [] - # check output options + + # Generate the required output files + # csv_fields = [] file_descriptors = {} if output_options.output_modes: - if "csv" in output_options.output_modes: - csv_fields = generate_csv_fields() - + # We have to create the required output files file_descriptors = fill_file_descriptors( output_options.output_modes, output_options.output_directory, - csv_fields, output_options.output_filename, ) @@ -70,7 +73,64 @@ def report(check_findings, output_options, audit_info): f"\t{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}" ) if file_descriptors: - # sending the finding to input options + if "ens_rd2022_aws" in output_options.output_modes: + # We have to retrieve all the check's compliance requirements + check_compliance = output_options.bulk_checks_metadata[ + finding.check_metadata.CheckID + ].Compliance + for compliance in check_compliance: + if ( + compliance.Framework == "ENS" + and compliance.Version == "RD2022" + ): + for requirement in compliance.Requirements: + requirement_description = requirement.Description + requirement_id = requirement.Id + for attribute in requirement.Attributes: + compliance_row = Check_Output_CSV_ENS_RD2022( + Provider=finding.check_metadata.Provider, + AccountId=audit_info.audited_account, + Region=finding.region, + AssessmentDate=timestamp.isoformat(), + Requirements_Id=requirement_id, + Requirements_Description=requirement_description, + Requirements_Attributes_IdGrupoControl=attribute.get( + "IdGrupoControl" + ), + Requirements_Attributes_Marco=attribute.get( + "Marco" + ), + Requirements_Attributes_Categoria=attribute.get( + "Categoria" + ), + Requirements_Attributes_DescripcionControl=attribute.get( + "DescripcionControl" + ), + Requirements_Attributes_Nivel=attribute.get( + "Nivel" + ), + Requirements_Attributes_Tipo=attribute.get( + "Tipo" + ), + Requirements_Attributes_Dimensiones=",".join( + attribute.get("Dimensiones") + ), + Status=finding.status, + StatusExtended=finding.status_extended, + ResourceId=finding.resource_id, + CheckId=finding.check_metadata.CheckID, + ) + + csv_header = generate_csv_fields( + Check_Output_CSV_ENS_RD2022 + ) + csv_writer = DictWriter( + file_descriptors["ens_rd2022_aws"], + fieldnames=csv_header, + delimiter=";", + ) + csv_writer.writerow(compliance_row.__dict__) + if "csv" in file_descriptors: finding_output = Check_Output_CSV( audit_info.audited_account, @@ -79,7 +139,9 @@ def report(check_findings, output_options, audit_info): audit_info.organizations_metadata, ) csv_writer = DictWriter( - file_descriptors["csv"], fieldnames=csv_fields, delimiter=";" + file_descriptors["csv"], + fieldnames=generate_csv_fields(Check_Output_CSV), + delimiter=";", ) csv_writer.writerow(finding_output.__dict__) @@ -117,65 +179,75 @@ def report(check_findings, output_options, audit_info): file_descriptors.get(file_descriptor).close() -def fill_file_descriptors(output_modes, output_directory, csv_fields, output_filename): +def initialize_file_descriptor( + filename: str, output_mode: str, format: Any = None +) -> TextIOWrapper: + """Open/Create the output file. If needed include headers or the required format""" + + if file_exists(filename): + file_descriptor = open_file( + filename, + "a", + ) + else: + file_descriptor = open_file( + filename, + "a", + ) + + if output_mode in ("csv", "ens_rd2022_aws"): + # Format is the class model of the CSV format to print the headers + csv_header = [x.upper() for x in generate_csv_fields(format)] + csv_writer = DictWriter( + file_descriptor, fieldnames=csv_header, delimiter=";" + ) + csv_writer.writeheader() + + if output_mode in ("json", "json-asff"): + file_descriptor = open_file( + filename, + "a", + ) + file_descriptor.write("[") + + return file_descriptor + + +def fill_file_descriptors(output_modes, output_directory, output_filename): file_descriptors = {} - for output_mode in output_modes: - if output_mode == "csv": - filename = f"{output_directory}/{output_filename}{csv_file_suffix}" - if file_exists(filename): - file_descriptor = open_file( - filename, - "a", + if output_modes: + for output_mode in output_modes: + if output_mode == "csv": + filename = f"{output_directory}/{output_filename}{csv_file_suffix}" + file_descriptor = initialize_file_descriptor( + filename, output_mode, Check_Output_CSV ) - else: - file_descriptor = open_file( - filename, - "a", - ) - csv_header = [x.upper() for x in csv_fields] - csv_writer = DictWriter( - file_descriptor, fieldnames=csv_header, delimiter=";" - ) - csv_writer.writeheader() + file_descriptors.update({output_mode: file_descriptor}) - file_descriptors.update({output_mode: file_descriptor}) + if output_mode == "json": + filename = f"{output_directory}/{output_filename}{json_file_suffix}" + file_descriptor = initialize_file_descriptor(filename, output_mode) + file_descriptors.update({output_mode: file_descriptor}) - if output_mode == "json": - filename = f"{output_directory}/{output_filename}{json_file_suffix}" - if file_exists(filename): - file_descriptor = open_file( - filename, - "a", + if output_mode == "json-asff": + filename = ( + f"{output_directory}/{output_filename}{json_asff_file_suffix}" ) - else: - file_descriptor = open_file( - filename, - "a", - ) - file_descriptor.write("[") + file_descriptor = initialize_file_descriptor(filename, output_mode) + file_descriptors.update({output_mode: file_descriptor}) - file_descriptors.update({output_mode: file_descriptor}) - - if output_mode == "json-asff": - filename = f"{output_directory}/{output_filename}{json_asff_file_suffix}" - if file_exists(filename): - file_descriptor = open_file( - filename, - "a", + if output_mode == "ens_rd2022_aws": + filename = f"{output_directory}/{output_filename}_ens_rd2022_aws{csv_file_suffix}" + file_descriptor = initialize_file_descriptor( + filename, output_mode, Check_Output_CSV_ENS_RD2022 ) - else: - file_descriptor = open_file( - filename, - "a", - ) - file_descriptor.write("[") - - file_descriptors.update({output_mode: file_descriptor}) + file_descriptors.update({output_mode: file_descriptor}) return file_descriptors -def set_report_color(status): +def set_report_color(status: str) -> str: + """Return the color for a give result status""" color = "" if status == "PASS": color = Fore.GREEN @@ -192,9 +264,10 @@ def set_report_color(status): return color -def generate_csv_fields(): +def generate_csv_fields(format: Any) -> list[str]: + """Generates the CSV headers for the given class""" csv_fields = [] - for field in Check_Output_CSV.__dict__["__annotations__"].keys(): + for field in format.__dict__.get("__annotations__").keys(): csv_fields.append(field) return csv_fields @@ -271,7 +344,9 @@ def close_json(output_filename, output_directory, mode): file_descriptor.write("]") file_descriptor.close() except Exception as error: - logger.critical(f"{error.__class__.__name__} -- {error}") + logger.critical( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}" + ) sys.exit() @@ -294,7 +369,9 @@ def send_to_s3_bucket( s3_client.upload_file(file_name, bucket_name, object_name) except Exception as error: - logger.critical(f"{error.__class__.__name__} -- {error}") + logger.critical( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}" + ) sys.exit() @@ -305,75 +382,76 @@ def display_summary_table( output_directory: str, ): try: - if findings: - current = { - "Service": "", - "Provider": "", - "Critical": 0, - "High": 0, - "Medium": 0, - "Low": 0, - } - findings_table = { - "Provider": [], - "Service": [], - "Status": [], - "Critical": [], - "High": [], - "Medium": [], - "Low": [], - } - pass_count = fail_count = 0 - for finding in findings: - # If new service and not first, add previous row - if ( - current["Service"] != finding.check_metadata.ServiceName - and current["Service"] - ): + current = { + "Service": "", + "Provider": "", + "Total": 0, + "Critical": 0, + "High": 0, + "Medium": 0, + "Low": 0, + } + findings_table = { + "Provider": [], + "Service": [], + "Status": [], + "Critical": [], + "High": [], + "Medium": [], + "Low": [], + } + pass_count = fail_count = 0 + for finding in findings: + # If new service and not first, add previous row + if ( + current["Service"] != finding.check_metadata.ServiceName + and current["Service"] + ): - add_service_to_table(findings_table, current) + add_service_to_table(findings_table, current) - current["Critical"] = current["High"] = current["Medium"] = current[ - "Low" - ] = 0 + current["Total"] = current["Critical"] = current["High"] = current[ + "Medium" + ] = current["Low"] = 0 - current["Service"] = finding.check_metadata.ServiceName - current["Provider"] = finding.check_metadata.Provider + current["Service"] = finding.check_metadata.ServiceName + current["Provider"] = finding.check_metadata.Provider - if finding.status == "PASS": - pass_count += 1 - elif finding.status == "FAIL": - fail_count += 1 - if finding.check_metadata.Severity == "critical": - current["Critical"] += 1 - elif finding.check_metadata.Severity == "high": - current["High"] += 1 - elif finding.check_metadata.Severity == "medium": - current["Medium"] += 1 - elif finding.check_metadata.Severity == "low": - current["Low"] += 1 + current["Total"] += 1 + if finding.status == "PASS": + pass_count += 1 + elif finding.status == "FAIL": + fail_count += 1 + if finding.check_metadata.Severity == "critical": + current["Critical"] += 1 + elif finding.check_metadata.Severity == "high": + current["High"] += 1 + elif finding.check_metadata.Severity == "medium": + current["Medium"] += 1 + elif finding.check_metadata.Severity == "low": + current["Low"] += 1 - # Add final service - add_service_to_table(findings_table, current) + # Add final service + add_service_to_table(findings_table, current) - print("\nOverview Results:") - overview_table = [ - [ - f"{Fore.RED}{round(fail_count/len(findings)*100, 2)}% ({fail_count}) Failed{Style.RESET_ALL}", - f"{Fore.GREEN}{round(pass_count/len(findings)*100, 2)}% ({pass_count}) Passed{Style.RESET_ALL}", - ] + print("\nOverview Results:") + overview_table = [ + [ + f"{Fore.RED}{round(fail_count/len(findings)*100, 2)}% ({fail_count}) Failed{Style.RESET_ALL}", + f"{Fore.GREEN}{round(pass_count/len(findings)*100, 2)}% ({pass_count}) Passed{Style.RESET_ALL}", ] - print(tabulate(overview_table, tablefmt="rounded_grid")) - print( - f"\nAccount {Fore.YELLOW}{audit_info.audited_account}{Style.RESET_ALL} Scan Results (severity columns are for fails only):" - ) - print(tabulate(findings_table, headers="keys", tablefmt="rounded_grid")) - print( - f"{Style.BRIGHT}* You only see here those services that contains resources.{Style.RESET_ALL}" - ) - print("\nDetailed results are in:") - print(f" - CSV: {output_directory}/{output_filename}.csv") - print(f" - JSON: {output_directory}/{output_filename}.json\n") + ] + print(tabulate(overview_table, tablefmt="rounded_grid")) + print( + f"\nAccount {Fore.YELLOW}{audit_info.audited_account}{Style.RESET_ALL} Scan Results (severity columns are for fails only):" + ) + print(tabulate(findings_table, headers="keys", tablefmt="rounded_grid")) + print( + f"{Style.BRIGHT}* You only see here those services that contains resources.{Style.RESET_ALL}" + ) + print("\nDetailed results are in:") + print(f" - CSV: {output_directory}/{output_filename}.csv") + print(f" - JSON: {output_directory}/{output_filename}.json\n") except Exception as error: logger.critical( @@ -389,9 +467,12 @@ def add_service_to_table(findings_table, current): or current["Medium"] > 0 or current["Low"] > 0 ): - current["Status"] = f"{Fore.RED}FAIL{Style.RESET_ALL}" + total_fails = ( + current["Critical"] + current["High"] + current["Medium"] + current["Low"] + ) + current["Status"] = f"{Fore.RED}FAIL ({total_fails}){Style.RESET_ALL}" else: - current["Status"] = f"{Fore.GREEN}PASS{Style.RESET_ALL}" + current["Status"] = f"{Fore.GREEN}PASS ({current['Total']}){Style.RESET_ALL}" findings_table["Provider"].append(current["Provider"]) findings_table["Service"].append(current["Service"]) findings_table["Status"].append(current["Status"]) @@ -403,3 +484,109 @@ def add_service_to_table(findings_table, current): f"{Fore.YELLOW}{current['Medium']}{Style.RESET_ALL}" ) findings_table["Low"].append(f"{Fore.BLUE}{current['Low']}{Style.RESET_ALL}") + + +def display_compliance_table( + findings: list, + bulk_checks_metadata: dict, + compliance_framework: str, + output_filename: str, + output_directory: str, +): + try: + if "ens_rd2022_aws" in compliance_framework: + marcos = {} + ens_compliance_table = { + "Proveedor": [], + "Marco/Categoria": [], + "Estado": [], + "PYTEC": [], + "Alto": [], + "Medio": [], + "Bajo": [], + } + pass_count = fail_count = 0 + for finding in findings: + check = bulk_checks_metadata[finding.check_metadata.CheckID] + check_compliances = check.Compliance + for compliance in check_compliances: + if ( + compliance.Framework == "ENS" + and compliance.Provider == "AWS" + and compliance.Version == "RD2022" + ): + for requirement in compliance.Requirements: + for attribute in requirement.Attributes: + marco_categoria = ( + f"{attribute['Marco']}/{attribute['Categoria']}" + ) + # Check if Marco/Categoria exists + if marco_categoria not in marcos: + marcos[marco_categoria] = { + "Estado": f"{Fore.GREEN}CUMPLE{Style.RESET_ALL}", + "Pytec": 0, + "Alto": 0, + "Medio": 0, + "Bajo": 0, + } + if finding.status == "FAIL": + fail_count += 1 + marcos[marco_categoria][ + "Estado" + ] = f"{Fore.RED}NO CUMPLE{Style.RESET_ALL}" + elif finding.status == "PASS": + pass_count += 1 + if attribute["Nivel"] == "pytec": + marcos[marco_categoria]["Pytec"] += 1 + elif attribute["Nivel"] == "alto": + marcos[marco_categoria]["Alto"] += 1 + elif attribute["Nivel"] == "medio": + marcos[marco_categoria]["Medio"] += 1 + elif attribute["Nivel"] == "bajo": + marcos[marco_categoria]["Bajo"] += 1 + + # Add results to table + for marco in marcos: + ens_compliance_table["Proveedor"].append("aws") + ens_compliance_table["Marco/Categoria"].append(marco) + ens_compliance_table["Estado"].append(marcos[marco]["Estado"]) + ens_compliance_table["PYTEC"].append( + f"{Fore.LIGHTRED_EX}{marcos[marco]['Pytec']}{Style.RESET_ALL}" + ) + ens_compliance_table["Alto"].append( + f"{Fore.RED}{marcos[marco]['Alto']}{Style.RESET_ALL}" + ) + ens_compliance_table["Medio"].append( + f"{Fore.YELLOW}{marcos[marco]['Medio']}{Style.RESET_ALL}" + ) + ens_compliance_table["Bajo"].append( + f"{Fore.BLUE}{marcos[marco]['Bajo']}{Style.RESET_ALL}" + ) + + print( + f"\nEstado de Cumplimiento de {Fore.YELLOW}ENS RD2022 - AWS{Style.RESET_ALL}:" + ) + overview_table = [ + [ + f"{Fore.RED}{round(fail_count/(fail_count+pass_count)*100, 2)}% ({fail_count}) NO CUMPLE{Style.RESET_ALL}", + f"{Fore.GREEN}{round(pass_count/(fail_count+pass_count)*100, 2)}% ({pass_count}) CUMPLE{Style.RESET_ALL}", + ] + ] + print(tabulate(overview_table, tablefmt="rounded_grid")) + print(f"\nResultados de {Fore.YELLOW}ENS RD2022 - AWS{Style.RESET_ALL}:") + print( + tabulate(ens_compliance_table, headers="keys", tablefmt="rounded_grid") + ) + print( + f"{Style.BRIGHT}* Solo aparece el Marco/Categoria que contiene resultados.{Style.RESET_ALL}" + ) + print("\nResultados detallados en:") + print( + f" - CSV: {output_directory}/{output_filename}_{compliance_framework[0]}.csv\n" + ) + + except Exception as error: + logger.critical( + f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}" + ) + sys.exit() diff --git a/lib/outputs/outputs_test.py b/lib/outputs/outputs_test.py index 4fbf2109..d9b203f0 100644 --- a/lib/outputs/outputs_test.py +++ b/lib/outputs/outputs_test.py @@ -2,6 +2,7 @@ import os from os import path, remove import boto3 +import pytest from colorama import Fore from moto import mock_s3 @@ -9,14 +10,15 @@ from config.config import ( csv_file_suffix, json_asff_file_suffix, json_file_suffix, + orange_color, output_file_timestamp, prowler_version, timestamp_iso, timestamp_utc, - orange_color, ) from lib.check.models import Check_Report, load_check_metadata from lib.outputs.models import ( + Check_Output_CSV, Check_Output_JSON, Check_Output_JSON_ASFF, Compliance, @@ -40,7 +42,7 @@ class Test_Outputs: def test_fill_file_descriptors(self): audited_account = "123456789012" output_directory = f"{os.path.dirname(os.path.realpath(__file__))}" - csv_fields = generate_csv_fields() + generate_csv_fields(Check_Output_CSV) test_output_modes = [ ["csv"], ["json"], @@ -98,7 +100,6 @@ class Test_Outputs: test_output_file_descriptors = fill_file_descriptors( output_mode_list, output_directory, - csv_fields, output_filename, ) for output_mode in output_mode_list: @@ -115,6 +116,17 @@ class Test_Outputs: for status in test_status: assert set_report_color(status) in test_colors + def test_set_report_color_invalid(self): + test_status = "INVALID" + + with pytest.raises(Exception) as exc: + set_report_color(test_status) + + assert "Invalid Report Status. Must be PASS, FAIL, ERROR or WARNING" in str( + exc.value + ) + assert exc.type == Exception + def test_generate_csv_fields(self): expected = [ "assessment_start_time", @@ -154,10 +166,10 @@ class Test_Outputs: "depends_on", "related_to", "notes", - "compliance", + # "compliance", ] - assert generate_csv_fields() == expected + assert generate_csv_fields(Check_Output_CSV) == expected def test_fill_json(self): input_audit_info = AWS_Audit_Info( @@ -177,7 +189,7 @@ class Test_Outputs: finding = Check_Report( load_check_metadata( f"{path.dirname(path.realpath(__file__))}/fixtures/metadata.json" - ) + ).json() ) finding.resource_details = "Test resource details" finding.resource_id = "test-resource" @@ -221,7 +233,7 @@ class Test_Outputs: finding = Check_Report( load_check_metadata( f"{path.dirname(path.realpath(__file__))}/fixtures/metadata.json" - ) + ).json() ) finding.resource_details = "Test resource details" finding.resource_id = "test-resource" diff --git a/lib/utils/utils.py b/lib/utils/utils.py index 9f0f485a..5646000c 100644 --- a/lib/utils/utils.py +++ b/lib/utils/utils.py @@ -12,7 +12,9 @@ def open_file(input_file: str, mode: str = "r") -> TextIOWrapper: try: f = open(input_file, mode) except Exception as e: - logger.critical(f"{input_file}: {e.__class__.__name__}") + logger.critical( + f"{input_file}: {e.__class__.__name__}[{e.__traceback__.tb_lineno}]" + ) sys.exit() else: return f @@ -23,7 +25,9 @@ def parse_json_file(input_file: TextIOWrapper) -> Any: try: json_file = json.load(input_file) except Exception as e: - logger.critical(f"{input_file.name}: {e.__class__.__name__}") + logger.critical( + f"{input_file.name}: {e.__class__.__name__}[{e.__traceback__.tb_lineno}]" + ) sys.exit() else: return json_file @@ -34,13 +38,12 @@ def file_exists(filename: str): try: exists_filename = exists(filename) except Exception as e: - logger.critical(f"{exists_filename.name}: {e.__class__.__name__}") - quit() + logger.critical( + f"{exists_filename.name}: {e.__class__.__name__}[{e.__traceback__.tb_lineno}]" + ) + sys.exit() else: - if exists_filename: - return True - else: - return False + return exists_filename # create sha512 hash for string diff --git a/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.metadata.json b/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.metadata.json index 8407dcbe..784b2343 100644 --- a/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.metadata.json +++ b/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "accessanalyzer_enabled_without_findings", "CheckTitle": "Check if IAM Access Analyzer is enabled without findings", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "accessanalyzer", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id", @@ -30,13 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Framework": "CIS-AWS", - "Version": "1.4", - "Control": [ "1.20" ], - "Group": [ "level1" ] - } - ] + "Notes": "" } diff --git a/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.py b/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.py index 5ce1efe9..a6a16073 100644 --- a/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.py +++ b/providers/aws/services/accessanalyzer/accessanalyzer_enabled_without_findings/accessanalyzer_enabled_without_findings.py @@ -8,7 +8,7 @@ class accessanalyzer_enabled_without_findings(Check): def execute(self): findings = [] for analyzer in accessanalyzer_client.analyzers: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = analyzer.region if analyzer.status == "ACTIVE": if analyzer.findings_count > 0: diff --git a/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.metadata.json b/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.metadata.json index baf2e6aa..683af51e 100644 --- a/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.metadata.json +++ b/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "account_maintain_current_contact_details", "CheckTitle": "Maintain current contact details.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "account", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-recorder:region:account-id:recorder/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.py b/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.py index f35f02b1..b0dbbebd 100644 --- a/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.py +++ b/providers/aws/services/account/account_maintain_current_contact_details/account_maintain_current_contact_details.py @@ -6,7 +6,7 @@ from providers.aws.services.account.account_client import account_client class account_maintain_current_contact_details(Check): def execute(self): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = account_client.region report.resource_id = account_client.audited_account report.status = "INFO" diff --git a/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.metadata.json b/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.metadata.json index 8fbb6055..f5387f25 100644 --- a/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.metadata.json +++ b/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "account_security_contact_information_is_registered", "CheckTitle": "Ensure security contact information is registered.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "account", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-recorder:region:account-id:recorder/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.py b/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.py index e6e1eeeb..9e33182f 100644 --- a/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.py +++ b/providers/aws/services/account/account_security_contact_information_is_registered/account_security_contact_information_is_registered.py @@ -6,7 +6,7 @@ from providers.aws.services.account.account_client import account_client class account_security_contact_information_is_registered(Check): def execute(self): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = account_client.region report.resource_id = account_client.audited_account report.status = "INFO" diff --git a/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.metadata.json b/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.metadata.json index 936d59a6..ef6ddf2a 100644 --- a/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.metadata.json +++ b/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "account_security_questions_are_registered_in_the_aws_account", "CheckTitle": "Ensure security questions are registered in the AWS account.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "account", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-recorder:region:account-id:recorder/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.py b/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.py index d068618b..c1b775ef 100644 --- a/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.py +++ b/providers/aws/services/account/account_security_questions_are_registered_in_the_aws_account/account_security_questions_are_registered_in_the_aws_account.py @@ -6,7 +6,7 @@ from providers.aws.services.account.account_client import account_client class account_security_questions_are_registered_in_the_aws_account(Check): def execute(self): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = account_client.region report.resource_id = account_client.audited_account report.status = "INFO" diff --git a/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.metadata.json b/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.metadata.json index 4b1f845b..40ebbb5e 100644 --- a/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.metadata.json +++ b/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "acm_certificates_expiration_check", "CheckTitle": "Check if ACM Certificates are about to expire in specific days or less", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "acm", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:acm:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.py b/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.py index 6c364078..54d3611f 100644 --- a/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.py +++ b/providers/aws/services/acm/acm_certificates_expiration_check/acm_certificates_expiration_check.py @@ -8,7 +8,7 @@ class acm_certificates_expiration_check(Check): def execute(self): findings = [] for certificate in acm_client.certificates: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = certificate.region if certificate.expiration_days > DAYS_TO_EXPIRE_THRESHOLD: report.status = "PASS" diff --git a/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.metadata.json b/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.metadata.json index 20117024..da6fb5b2 100644 --- a/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.metadata.json +++ b/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "acm_certificates_transparency_logs_enabled", "CheckTitle": "Check if ACM certificates have Certificate Transparency logging enabled", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "acm", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:acm:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.py b/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.py index d2d5f323..3a8a2601 100644 --- a/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.py +++ b/providers/aws/services/acm/acm_certificates_transparency_logs_enabled/acm_certificates_transparency_logs_enabled.py @@ -6,7 +6,7 @@ class acm_certificates_transparency_logs_enabled(Check): def execute(self): findings = [] for certificate in acm_client.certificates: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = certificate.region if certificate.type == "IMPORTED": report.status = "PASS" diff --git a/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.metadata.json b/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.metadata.json index 99992bd3..14ddf201 100644 --- a/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.metadata.json +++ b/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "apigateway_authorizers_enabled", "CheckTitle": "Check if API Gateway has configured authorizers.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "apigateway", "SubServiceName": "rest_api", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.py b/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.py index d098aebe..9a92d58b 100644 --- a/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.py +++ b/providers/aws/services/apigateway/apigateway_authorizers_enabled/apigateway_authorizers_enabled.py @@ -6,7 +6,7 @@ class apigateway_authorizers_enabled(Check): def execute(self): findings = [] for rest_api in apigateway_client.rest_apis: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = rest_api.region if rest_api.authorizer: report.status = "PASS" diff --git a/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.metadata.json b/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.metadata.json index 88cf98cd..6fbff154 100644 --- a/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.metadata.json +++ b/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "apigateway_client_certificate_enabled", "CheckTitle": "Check if API Gateway has client certificate enabled to access your backend endpoint.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "apigateway", "SubServiceName": "rest_api", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.py b/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.py index 9ca51db5..2dcae406 100644 --- a/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.py +++ b/providers/aws/services/apigateway/apigateway_client_certificate_enabled/apigateway_client_certificate_enabled.py @@ -7,7 +7,7 @@ class apigateway_client_certificate_enabled(Check): findings = [] for rest_api in apigateway_client.rest_apis: for stage in rest_api.stages: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) if stage.client_certificate: report.status = "PASS" report.status_extended = f"API Gateway {rest_api.name} ID {rest_api.id} in stage {stage.name} has client certificate enabled." diff --git a/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.metadata.json b/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.metadata.json index 27601cf3..d1809965 100644 --- a/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.metadata.json +++ b/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "apigateway_endpoint_public", "CheckTitle": "Check if API Gateway endpoint is public or private.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "apigateway", "SubServiceName": "rest_api", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.py b/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.py index a584ef62..0ee0b2a1 100644 --- a/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.py +++ b/providers/aws/services/apigateway/apigateway_endpoint_public/apigateway_endpoint_public.py @@ -6,7 +6,7 @@ class apigateway_endpoint_public(Check): def execute(self): findings = [] for rest_api in apigateway_client.rest_apis: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = rest_api.region if rest_api.public_endpoint: report.status = "FAIL" diff --git a/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.metadata.json b/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.metadata.json index e28caf1e..d27ec640 100644 --- a/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.metadata.json +++ b/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "apigateway_logging_enabled", "CheckTitle": "Check if API Gateway has logging enabled.", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "apigateway", "SubServiceName": "rest_api", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.py b/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.py index 5fa38c6c..26e60ba3 100644 --- a/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.py +++ b/providers/aws/services/apigateway/apigateway_logging_enabled/apigateway_logging_enabled.py @@ -6,7 +6,7 @@ class apigateway_logging_enabled(Check): def execute(self): findings = [] for rest_api in apigateway_client.rest_apis: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = rest_api.region for stage in rest_api.stages: if stage.logging: diff --git a/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.metadata.json b/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.metadata.json index beba42e9..835e3662 100644 --- a/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.metadata.json +++ b/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "apigateway_waf_acl_attached", "CheckTitle": "Check if API Gateway has a WAF ACL attached.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "apigateway", "SubServiceName": "rest_api", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.py b/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.py index 644c73a9..a1e82e04 100644 --- a/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.py +++ b/providers/aws/services/apigateway/apigateway_waf_acl_attached/apigateway_waf_acl_attached.py @@ -6,7 +6,7 @@ class apigateway_waf_acl_attached(Check): def execute(self): findings = [] for rest_api in apigateway_client.rest_apis: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = rest_api.region for stage in rest_api.stages: if stage.waf: diff --git a/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.metadata.json b/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.metadata.json index e4736d6e..ffa7dfbf 100644 --- a/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.metadata.json +++ b/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.metadata.json @@ -1,8 +1,10 @@ { "Provider": "aws", - "CheckID": "apigatewayv2_authorizers_enabled", + "CheckID": "apigatewayv2_access_logging_enabled", "CheckTitle": "Check if API Gateway V2 has configured authorizers.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "apigateway", "SubServiceName": "rest_api", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.py b/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.py index cc3c9441..63ea21d2 100644 --- a/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.py +++ b/providers/aws/services/apigatewayv2/apigatewayv2_access_logging_enabled/apigatewayv2_access_logging_enabled.py @@ -6,7 +6,7 @@ class apigatewayv2_access_logging_enabled(Check): def execute(self): findings = [] for api in apigatewayv2_client.apis: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = api.region for stage in api.stages: if stage.logging: diff --git a/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.metadata.json b/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.metadata.json index b73a3079..b00f8467 100644 --- a/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.metadata.json +++ b/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "apigatewayv2_authorizers_enabled", "CheckTitle": "Checks if API Gateway V2 has Access Logging enabled.", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "apigateway", "SubServiceName": "api", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.py b/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.py index ffb682b7..8aa82068 100644 --- a/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.py +++ b/providers/aws/services/apigatewayv2/apigatewayv2_authorizers_enabled/apigatewayv2_authorizers_enabled.py @@ -6,7 +6,7 @@ class apigatewayv2_authorizers_enabled(Check): def execute(self): findings = [] for api in apigatewayv2_client.apis: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = api.region if api.authorizer: report.status = "PASS" diff --git a/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.metadata.json b/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.metadata.json index 10b06dd3..1a8dbb33 100644 --- a/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.metadata.json +++ b/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.metadata.json @@ -1,36 +1,38 @@ { - "Provider": "aws", - "CheckID": "appstream_fleet_default_internet_access_disabled", - "CheckTitle": "Ensure default Internet Access from your Amazon AppStream fleet streaming instances should remain unchecked.", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards", "CIS AWS Foundations Benchmark"], - "ServiceName": "appstream", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", - "Severity": "medium", - "ResourceType": "AppStream", - "Description": "Ensure default Internet Access from your Amazon AppStream fleet streaming instances should remain unchecked.", - "Risk": "Default Internet Access from your fleet streaming instances should be controlled using a NAT gateway in the VPC.", - "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Uncheck the default internet access for the AppStream Fleet.", - "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" - } + "Provider": "aws", + "CheckID": "appstream_fleet_default_internet_access_disabled", + "CheckTitle": "Ensure default Internet Access from your Amazon AppStream fleet streaming instances should remain unchecked.", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "appstream", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", + "Severity": "medium", + "ResourceType": "AppStream", + "Description": "Ensure default Internet Access from your Amazon AppStream fleet streaming instances should remain unchecked.", + "Risk": "Default Internet Access from your fleet streaming instances should be controlled using a NAT gateway in the VPC.", + "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "Infrastructure Security", - "Compliance": [] - } - + "Recommendation": { + "Text": "Uncheck the default internet access for the AppStream Fleet.", + "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "Infrastructure Security" +} diff --git a/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.py b/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.py index cddb2292..f6986b49 100644 --- a/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.py +++ b/providers/aws/services/appstream/appstream_fleet_default_internet_access_disabled/appstream_fleet_default_internet_access_disabled.py @@ -10,7 +10,7 @@ class appstream_fleet_default_internet_access_disabled(Check): """Execute the appstream_fleet_default_internet_access_disabled check""" findings = [] for fleet in appstream_client.fleets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = fleet.region report.resource_id = fleet.name report.resource_arn = fleet.arn diff --git a/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.metadata.json b/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.metadata.json index 136ff51d..043caa92 100644 --- a/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.metadata.json +++ b/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.metadata.json @@ -1,36 +1,36 @@ { - "Provider": "aws", - "CheckID": "appstream_fleet_maximum_session_duration", - "CheckTitle": "Ensure user maximum session duration is no longer than 10 hours.", - "CheckType": ["Infrastructure Security"], - "ServiceName": "appstream", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", - "Severity": "medium", - "ResourceType": "AppStream", - "Description": "Ensure user maximum session duration is no longer than 10 hours.", - "Risk": "Having a session duration lasting longer than 10 hours should not be necessary and if running for any malicious reasons provides a greater time for usage than should be allowed.", - "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Change the Maximum session duration is set to 600 minutes or less for the AppStream Fleet.", - "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" - } + "Provider": "aws", + "CheckID": "appstream_fleet_maximum_session_duration", + "CheckTitle": "Ensure user maximum session duration is no longer than 10 hours.", + "CheckType": [ + "Infrastructure Security" + ], + "ServiceName": "appstream", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", + "Severity": "medium", + "ResourceType": "AppStream", + "Description": "Ensure user maximum session duration is no longer than 10 hours.", + "Risk": "Having a session duration lasting longer than 10 hours should not be necessary and if running for any malicious reasons provides a greater time for usage than should be allowed.", + "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "Infrastructure Security", - "Compliance": [] - } - + "Recommendation": { + "Text": "Change the Maximum session duration is set to 600 minutes or less for the AppStream Fleet.", + "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "Infrastructure Security" +} diff --git a/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.py b/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.py index 8ad1b241..a9025969 100644 --- a/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.py +++ b/providers/aws/services/appstream/appstream_fleet_maximum_session_duration/appstream_fleet_maximum_session_duration.py @@ -13,7 +13,7 @@ class appstream_fleet_maximum_session_duration(Check): """Execute the appstream_fleet_maximum_session_duration check""" findings = [] for fleet in appstream_client.fleets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = fleet.region report.resource_id = fleet.name report.resource_arn = fleet.arn diff --git a/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.metadata.json b/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.metadata.json index 837531bf..9c0d50dd 100644 --- a/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.metadata.json +++ b/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.metadata.json @@ -1,36 +1,38 @@ { - "Provider": "aws", - "CheckID": "appstream_fleet_session_disconnect_timeout", - "CheckTitle": "Ensure session disconnect timeout is set to 5 minutes or lesss.", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards", "CIS AWS Foundations Benchmark"], - "ServiceName": "appstream", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", - "Severity": "medium", - "ResourceType": "AppStream", - "Description": "Ensure session disconnect timeout is set to 5 minutes or less", - "Risk": "Disconnect timeout in minutes, is the amount of of time that a streaming session remains active after users disconnect.", - "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Change the Disconnect timeout to 5 minutes or less for the AppStream Fleet.", - "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" - } + "Provider": "aws", + "CheckID": "appstream_fleet_session_disconnect_timeout", + "CheckTitle": "Ensure session disconnect timeout is set to 5 minutes or lesss.", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "appstream", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", + "Severity": "medium", + "ResourceType": "AppStream", + "Description": "Ensure session disconnect timeout is set to 5 minutes or less", + "Risk": "Disconnect timeout in minutes, is the amount of of time that a streaming session remains active after users disconnect.", + "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "Infrastructure Security", - "Compliance": [] - } - + "Recommendation": { + "Text": "Change the Disconnect timeout to 5 minutes or less for the AppStream Fleet.", + "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "Infrastructure Security" +} diff --git a/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.py b/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.py index 6fd01fd8..0e22cd14 100644 --- a/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.py +++ b/providers/aws/services/appstream/appstream_fleet_session_disconnect_timeout/appstream_fleet_session_disconnect_timeout.py @@ -13,7 +13,7 @@ class appstream_fleet_session_disconnect_timeout(Check): """Execute the appstream_fleet_maximum_session_duration check""" findings = [] for fleet in appstream_client.fleets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = fleet.region report.resource_id = fleet.name report.resource_arn = fleet.arn diff --git a/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.metadata.json b/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.metadata.json index 961287ed..eea94dad 100644 --- a/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.metadata.json +++ b/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.metadata.json @@ -1,36 +1,38 @@ { - "Provider": "aws", - "CheckID": "appstream_fleet_session_disconnect_timeout", - "CheckTitle": "Ensure session idle disconnect timeout is set to 10 minutes or less.", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards", "CIS AWS Foundations Benchmark"], - "ServiceName": "appstream", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", - "Severity": "medium", - "ResourceType": "AppStream", - "Description": "Ensure session idle disconnect timeout is set to 10 minutes or less.", - "Risk": "Idle disconnect timeout in minutes is the amount of time that users can be inactive before they are disconnected from their streaming session and the Disconnect timeout in minutes time begins.", - "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Change the session idle timeout to 10 minutes or less for the AppStream Fleet.", - "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" - } + "Provider": "aws", + "CheckID": "appstream_fleet_session_idle_disconnect_timeout", + "CheckTitle": "Ensure session idle disconnect timeout is set to 10 minutes or less.", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "appstream", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:appstream:region:account-id:fleet/resource-id", + "Severity": "medium", + "ResourceType": "AppStream", + "Description": "Ensure session idle disconnect timeout is set to 10 minutes or less.", + "Risk": "Idle disconnect timeout in minutes is the amount of time that users can be inactive before they are disconnected from their streaming session and the Disconnect timeout in minutes time begins.", + "RelatedUrl": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "Infrastructure Security", - "Compliance": [] - } - + "Recommendation": { + "Text": "Change the session idle timeout to 10 minutes or less for the AppStream Fleet.", + "Url": "https://docs.aws.amazon.com/appstream2/latest/developerguide/set-up-stacks-fleets.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "Infrastructure Security" +} diff --git a/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.py b/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.py index b7ac6426..f8a48c23 100644 --- a/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.py +++ b/providers/aws/services/appstream/appstream_fleet_session_idle_disconnect_timeout/appstream_fleet_session_idle_disconnect_timeout.py @@ -15,7 +15,7 @@ class appstream_fleet_session_idle_disconnect_timeout(Check): """Execute the appstream_fleet_session_idle_disconnect_timeout check""" findings = [] for fleet in appstream_client.fleets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = fleet.region report.resource_id = fleet.name report.resource_arn = fleet.arn diff --git a/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.metadata.json b/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.metadata.json index 6b9bd267..92665d64 100644 --- a/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.metadata.json +++ b/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "autoscaling_find_secrets_ec2_launch_configuration", "CheckTitle": "Find secrets in EC2 Auto Scaling Launch Configuration", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "autoscaling", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id", @@ -30,7 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - ] + "Notes": "" } diff --git a/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.py b/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.py index c0d58d51..3c45a116 100644 --- a/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.py +++ b/providers/aws/services/autoscaling/autoscaling_find_secrets_ec2_launch_configuration/autoscaling_find_secrets_ec2_launch_configuration.py @@ -13,7 +13,7 @@ class autoscaling_find_secrets_ec2_launch_configuration(Check): def execute(self): findings = [] for configuration in autoscaling_client.launch_configurations: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = configuration.region report.resource_id = configuration.name report.resource_arn = configuration.arn diff --git a/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.metadata.json b/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.metadata.json index e9983eef..3089069b 100644 --- a/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.metadata.json +++ b/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.py b/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.py index 7725482c..3bdbfd18 100644 --- a/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.py +++ b/providers/aws/services/awslambda/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled/awslambda_function_invoke_api_operations_cloudtrail_logging_enabled.py @@ -7,7 +7,7 @@ class awslambda_function_invoke_api_operations_cloudtrail_logging_enabled(Check) def execute(self): findings = [] for function in awslambda_client.functions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = function.region report.resource_id = function.name report.resource_arn = function.arn diff --git a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.metadata.json b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.metadata.json index d08368ac..2905481b 100644 --- a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.metadata.json +++ b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.py b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.py index fc7fb8c3..1b899f63 100644 --- a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.py +++ b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_code/awslambda_function_no_secrets_in_code.py @@ -10,7 +10,7 @@ class awslambda_function_no_secrets_in_code(Check): def execute(self): findings = [] for function in awslambda_client.functions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = function.region report.resource_id = function.name report.resource_arn = function.arn diff --git a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.metadata.json b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.metadata.json index 1f7c2a9d..f8bdbb39 100644 --- a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.metadata.json +++ b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.py b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.py index 404f3996..d0da4de4 100644 --- a/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.py +++ b/providers/aws/services/awslambda/awslambda_function_no_secrets_in_variables/awslambda_function_no_secrets_in_variables.py @@ -11,7 +11,7 @@ class awslambda_function_no_secrets_in_variables(Check): def execute(self): findings = [] for function in awslambda_client.functions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = function.region report.resource_id = function.name report.resource_arn = function.arn diff --git a/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.metadata.json b/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.metadata.json index 058142b6..f22cc625 100644 --- a/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.metadata.json +++ b/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.py b/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.py index 37eed32c..d188ed5a 100644 --- a/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.py +++ b/providers/aws/services/awslambda/awslambda_function_not_publicly_accessible/awslambda_function_not_publicly_accessible.py @@ -6,7 +6,7 @@ class awslambda_function_not_publicly_accessible(Check): def execute(self): findings = [] for function in awslambda_client.functions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = function.region report.resource_id = function.name report.resource_arn = function.arn diff --git a/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.metadata.json b/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.metadata.json index a93bfcee..7259d1ef 100644 --- a/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.metadata.json +++ b/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.py b/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.py index 3d71907a..afbd23b5 100644 --- a/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.py +++ b/providers/aws/services/awslambda/awslambda_function_url_cors_policy/awslambda_function_url_cors_policy.py @@ -6,7 +6,7 @@ class awslambda_function_url_cors_policy(Check): def execute(self): findings = [] for function in awslambda_client.functions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = function.region report.resource_id = function.name report.resource_arn = function.arn diff --git a/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.metadata.json b/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.metadata.json index 752b9cc0..a5912a42 100644 --- a/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.metadata.json +++ b/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.py b/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.py index b7dcc1ee..556b6c5a 100644 --- a/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.py +++ b/providers/aws/services/awslambda/awslambda_function_url_public/awslambda_function_url_public.py @@ -8,7 +8,7 @@ class awslambda_function_url_public(Check): def execute(self): findings = [] for function in awslambda_client.functions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = function.region report.resource_id = function.name report.resource_arn = function.arn diff --git a/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.metadata.json b/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.metadata.json index f0409d1d..69b9195e 100644 --- a/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.metadata.json +++ b/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.py b/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.py index b80fa0a3..c53c5e7f 100644 --- a/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.py +++ b/providers/aws/services/awslambda/awslambda_function_using_supported_runtimes/awslambda_function_using_supported_runtimes.py @@ -7,7 +7,7 @@ class awslambda_function_using_supported_runtimes(Check): def execute(self): findings = [] for function in awslambda_client.functions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = function.region report.resource_id = function.name report.resource_arn = function.arn diff --git a/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.metadata.json b/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.metadata.json index d40a52fc..04bf9d91 100644 --- a/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.metadata.json +++ b/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Infrastructure Protection", - "Compliance": [] + "Notes": "Infrastructure Protection" } diff --git a/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py b/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py index f2d6331c..ef6e73bd 100644 --- a/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py +++ b/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py @@ -17,7 +17,7 @@ class cloudformation_outputs_find_secrets(Check): """Execute the cloudformation_outputs_find_secrets check""" findings = [] for stack in cloudformation_client.stacks: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = stack.region report.resource_id = stack.name report.resource_arn = stack.arn diff --git a/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.metadata.json b/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.metadata.json index 908cb1fe..22cdf8ca 100644 --- a/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.metadata.json +++ b/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Infrastructure Protection", - "Compliance": [] + "Notes": "Infrastructure Protection" } diff --git a/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py b/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py index 606d8277..f8ddd3af 100644 --- a/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py +++ b/providers/aws/services/cloudformation/cloudformation_stacks_termination_protection_enabled/cloudformation_stacks_termination_protection_enabled.py @@ -12,7 +12,7 @@ class cloudformation_stacks_termination_protection_enabled(Check): findings = [] for stack in cloudformation_client.stacks: if not stack.is_nested_stack: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = stack.region report.resource_id = stack.name report.resource_arn = stack.arn diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.metadata.json b/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.metadata.json index 6dfa1824..af417821 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.metadata.json +++ b/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudfront_distributions_field_level_encryption_enabled", "CheckTitle": "Check if CloudFront distributions have Field Level Encryption enabled.", - "CheckType": [""], + "CheckType": [ + "" + ], "ServiceName": "cloudfront", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.py b/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.py index 8abc1378..0ea759af 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.py +++ b/providers/aws/services/cloudfront/cloudfront_distributions_field_level_encryption_enabled/cloudfront_distributions_field_level_encryption_enabled.py @@ -6,7 +6,7 @@ class cloudfront_distributions_field_level_encryption_enabled(Check): def execute(self): findings = [] for distribution in cloudfront_client.distributions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = distribution.region report.resource_arn = distribution.arn report.resource_id = distribution.id diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.metadata.json b/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.metadata.json index 330f8321..9925b660 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.metadata.json +++ b/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudfront_distributions_geo_restrictions_enabled", "CheckTitle": "Check if Geo restrictions are enabled in CloudFront distributions.", - "CheckType": [""], + "CheckType": [ + "" + ], "ServiceName": "cloudfront", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Infrastructure Security", - "Compliance": [] + "Notes": "Infrastructure Security" } diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.py b/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.py index 2d229d0d..619de02e 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.py +++ b/providers/aws/services/cloudfront/cloudfront_distributions_geo_restrictions_enabled/cloudfront_distributions_geo_restrictions_enabled.py @@ -7,7 +7,7 @@ class cloudfront_distributions_geo_restrictions_enabled(Check): def execute(self): findings = [] for distribution in cloudfront_client.distributions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = distribution.region report.resource_arn = distribution.arn report.resource_id = distribution.id diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.metadata.json b/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.metadata.json index 7beb5759..3ab8db1a 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.metadata.json +++ b/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudfront_distributions_https_enabled", "CheckTitle": "Check if CloudFront distributions are set to HTTPS.", - "CheckType": [""], + "CheckType": [ + "" + ], "ServiceName": "cloudfront", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.py b/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.py index e66a813e..272439d3 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.py +++ b/providers/aws/services/cloudfront/cloudfront_distributions_https_enabled/cloudfront_distributions_https_enabled.py @@ -7,7 +7,7 @@ class cloudfront_distributions_https_enabled(Check): def execute(self): findings = [] for distribution in cloudfront_client.distributions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = distribution.region report.resource_arn = distribution.arn report.resource_id = distribution.id diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.metadata.json b/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.metadata.json index 3e9a5085..8313b488 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.metadata.json +++ b/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudfront_distributions_logging_enabled", "CheckTitle": "Check if CloudFront distributions have logging enabled.", - "CheckType": [""], + "CheckType": [ + "" + ], "ServiceName": "cloudfront", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.py b/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.py index f4589aac..bbec6136 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.py +++ b/providers/aws/services/cloudfront/cloudfront_distributions_logging_enabled/cloudfront_distributions_logging_enabled.py @@ -6,7 +6,7 @@ class cloudfront_distributions_logging_enabled(Check): def execute(self): findings = [] for distribution in cloudfront_client.distributions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = distribution.region report.resource_arn = distribution.arn report.resource_id = distribution.id diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.metadata.json b/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.metadata.json index 55456f1b..f09d7d07 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.metadata.json +++ b/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudfront_distributions_using_deprecated_ssl_protocols", "CheckTitle": "Check if CloudFront distributions are using deprecated SSL protocols.", - "CheckType": [""], + "CheckType": [ + "" + ], "ServiceName": "cloudfront", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.py b/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.py index b7f8a65a..b4baa046 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.py +++ b/providers/aws/services/cloudfront/cloudfront_distributions_using_deprecated_ssl_protocols/cloudfront_distributions_using_deprecated_ssl_protocols.py @@ -7,7 +7,7 @@ class cloudfront_distributions_using_deprecated_ssl_protocols(Check): def execute(self): findings = [] for distribution in cloudfront_client.distributions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = distribution.region report.resource_arn = distribution.arn report.resource_id = distribution.id diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.metadata.json b/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.metadata.json index 95375455..4fc5b1cd 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.metadata.json +++ b/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudfront_distributions_using_waf", "CheckTitle": "Check if CloudFront distributions are using WAF.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "cloudfront", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudfront:region:account-id:distribution/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.py b/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.py index 4192350f..6bd52882 100644 --- a/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.py +++ b/providers/aws/services/cloudfront/cloudfront_distributions_using_waf/cloudfront_distributions_using_waf.py @@ -6,7 +6,7 @@ class cloudfront_distributions_using_waf(Check): def execute(self): findings = [] for distribution in cloudfront_client.distributions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = distribution.region report.resource_arn = distribution.arn report.resource_id = distribution.id diff --git a/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.metadata.json index 85d14f34..081cf558 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "cloudtrail_cloudwatch_logging_enabled", - "CheckTitle": "Ensure CloudTrail trails are integrated with CloudWatch Logs", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "cloudtrail", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "low", - "ResourceType": "AwsCloudTrailTrail", - "Description": "Ensure CloudTrail trails are integrated with CloudWatch Logs", - "Risk": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user; API; resource; and IP address; and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws cloudtrail update-trail --name --cloudwatch-logs-log-group- arn --cloudwatch-logs-role-arn ", - "NativeIaC": "", - "Other": "https://docs.bridgecrew.io/docs/logging_4#aws-console", - "Terraform": "" - }, - "Recommendation": { - "Text": "Validate that the trails in CloudTrail has an arn set in the CloudWatchLogsLogGroupArn property.", - "Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html" - } + "Provider": "aws", + "CheckID": "cloudtrail_cloudwatch_logging_enabled", + "CheckTitle": "Ensure CloudTrail trails are integrated with CloudWatch Logs", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "cloudtrail", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "low", + "ResourceType": "AwsCloudTrailTrail", + "Description": "Ensure CloudTrail trails are integrated with CloudWatch Logs", + "Risk": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user; API; resource; and IP address; and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws cloudtrail update-trail --name --cloudwatch-logs-log-group- arn --cloudwatch-logs-role-arn ", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/logging_4#aws-console", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Recommendation": { + "Text": "Validate that the trails in CloudTrail has an arn set in the CloudWatchLogsLogGroupArn property.", + "Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" } diff --git a/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.py b/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.py index f986b6c9..776b6fcb 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.py +++ b/providers/aws/services/cloudtrail/cloudtrail_cloudwatch_logging_enabled/cloudtrail_cloudwatch_logging_enabled.py @@ -11,7 +11,7 @@ class cloudtrail_cloudwatch_logging_enabled(Check): findings = [] for trail in cloudtrail_client.trails: if trail.name: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = trail.region report.resource_id = trail.name report.resource_arn = trail.arn diff --git a/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.metadata.json index a947ff99..f6e5344a 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "cloudtrail_kms_encryption_enabled", - "CheckTitle": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "cloudtrail", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsCloudTrailTrail", - "Description": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs", - "Risk": "By default; the log files delivered by CloudTrail to your bucket are encrypted by Amazon server-side encryption with Amazon S3-managed encryption keys (SSE-S3). To provide a security layer that is directly manageable; you can instead use server-side encryption with AWS KMS–managed keys (SSE-KMS) for your CloudTrail log files.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws cloudtrail update-trail --name --kms-id aws kms put-key-policy --key-id --policy ", - "NativeIaC": "https://docs.bridgecrew.io/docs/logging_7#fix---buildtime", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "This approach has the following advantages: You can create and manage the CMK encryption keys yourself. You can use a single CMK to encrypt and decrypt log files for multiple accounts across all regions. You have control over who can use your key for encrypting and decrypting CloudTrail log files. You can assign permissions for the key to the users. You have enhanced security.", - "Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html" - } + "Provider": "aws", + "CheckID": "cloudtrail_kms_encryption_enabled", + "CheckTitle": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "cloudtrail", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsCloudTrailTrail", + "Description": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs", + "Risk": "By default; the log files delivered by CloudTrail to your bucket are encrypted by Amazon server-side encryption with Amazon S3-managed encryption keys (SSE-S3). To provide a security layer that is directly manageable; you can instead use server-side encryption with AWS KMS–managed keys (SSE-KMS) for your CloudTrail log files.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws cloudtrail update-trail --name --kms-id aws kms put-key-policy --key-id --policy ", + "NativeIaC": "https://docs.bridgecrew.io/docs/logging_7#fix---buildtime", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "This approach has the following advantages: You can create and manage the CMK encryption keys yourself. You can use a single CMK to encrypt and decrypt log files for multiple accounts across all regions. You have control over who can use your key for encrypting and decrypting CloudTrail log files. You can assign permissions for the key to the users. You have enhanced security.", + "Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.py b/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.py index d5eb1232..8a789da5 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.py +++ b/providers/aws/services/cloudtrail/cloudtrail_kms_encryption_enabled/cloudtrail_kms_encryption_enabled.py @@ -7,7 +7,7 @@ class cloudtrail_kms_encryption_enabled(Check): findings = [] for trail in cloudtrail_client.trails: if trail.name: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = trail.region report.resource_id = trail.name report.resource_arn = trail.arn diff --git a/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.metadata.json index a67174b6..6ed6f775 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "cloudtrail_log_file_validation_enabled", - "CheckTitle": "Ensure CloudTrail log file validation is enabled", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "cloudtrail", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsCloudTrailTrail", - "Description": "Ensure CloudTrail log file validation is enabled", - "Risk": "Enabling log file validation will provide additional integrity checking of CloudTrail logs. ", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws cloudtrail update-trail --name --enable-log-file-validation", - "NativeIaC": "https://docs.bridgecrew.io/docs/logging_2#cloudformation", - "Other": "", - "Terraform": "https://docs.bridgecrew.io/docs/logging_2#terraform" - }, - "Recommendation": { - "Text": "Ensure LogFileValidationEnabled is set to true for each trail.", - "Url": "http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-filevalidation-enabling.html" - } + "Provider": "aws", + "CheckID": "cloudtrail_log_file_validation_enabled", + "CheckTitle": "Ensure CloudTrail log file validation is enabled", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "cloudtrail", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsCloudTrailTrail", + "Description": "Ensure CloudTrail log file validation is enabled", + "Risk": "Enabling log file validation will provide additional integrity checking of CloudTrail logs. ", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws cloudtrail update-trail --name --enable-log-file-validation", + "NativeIaC": "https://docs.bridgecrew.io/docs/logging_2#cloudformation", + "Other": "", + "Terraform": "https://docs.bridgecrew.io/docs/logging_2#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure LogFileValidationEnabled is set to true for each trail.", + "Url": "http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-filevalidation-enabling.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.py b/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.py index bf9765b1..d441311c 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.py +++ b/providers/aws/services/cloudtrail/cloudtrail_log_file_validation_enabled/cloudtrail_log_file_validation_enabled.py @@ -7,7 +7,7 @@ class cloudtrail_log_file_validation_enabled(Check): findings = [] for trail in cloudtrail_client.trails: if trail.name: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = trail.region report.resource_id = trail.name report.resource_arn = trail.arn diff --git a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.metadata.json index 75f1ab45..419b8874 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "cloudtrail_logs_s3_bucket_access_logging_enabled", - "CheckTitle": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "cloudtrail", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsCloudTrailTrail", - "Description": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket", - "Risk": "Server access logs can assist you in security and access audits; help you learn about your customer base; and understand your Amazon S3 bill.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "https://docs.bridgecrew.io/docs/logging_6#aws-console", - "Terraform": "" - }, - "Recommendation": { - "Text": "Ensure that S3 buckets have Logging enabled. CloudTrail data events can be used in place of S3 bucket logging. If that is the case; this finding can be considered a false positive.", - "Url": "https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html" - } + "Provider": "aws", + "CheckID": "cloudtrail_logs_s3_bucket_access_logging_enabled", + "CheckTitle": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "cloudtrail", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsCloudTrailTrail", + "Description": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket", + "Risk": "Server access logs can assist you in security and access audits; help you learn about your customer base; and understand your Amazon S3 bill.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/logging_6#aws-console", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure that S3 buckets have Logging enabled. CloudTrail data events can be used in place of S3 bucket logging. If that is the case; this finding can be considered a false positive.", + "Url": "https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.py b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.py index e1e4f747..0349bd9e 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.py +++ b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_access_logging_enabled/cloudtrail_logs_s3_bucket_access_logging_enabled.py @@ -9,7 +9,7 @@ class cloudtrail_logs_s3_bucket_access_logging_enabled(Check): for trail in cloudtrail_client.trails: if trail.name: trail_bucket = trail.s3_bucket - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = trail.region report.resource_id = trail.name report.resource_arn = trail.arn diff --git a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.metadata.json index 0391ddfd..5670d2f5 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "cloudtrail_logs_s3_bucket_is_not_publicly_accessible", - "CheckTitle": "Ensure the S3 bucket CloudTrail logs is not publicly accessible", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "cloudtrail", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "critical", - "ResourceType": "AwsCloudTrailTrail", - "Description": "Ensure the S3 bucket CloudTrail logs to is not publicly accessible", - "Risk": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected accounts use or configuration.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "https://docs.bridgecrew.io/docs/logging_3#aws-console", - "Terraform": "" - }, - "Recommendation": { - "Text": "Analyze Bucket policy to validate appropriate permissions. Ensure the AllUsers principal is not granted privileges. Ensure the AuthenticatedUsers principal is not granted privileges.", - "Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html" - } + "Provider": "aws", + "CheckID": "cloudtrail_logs_s3_bucket_is_not_publicly_accessible", + "CheckTitle": "Ensure the S3 bucket CloudTrail logs is not publicly accessible", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "cloudtrail", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "critical", + "ResourceType": "AwsCloudTrailTrail", + "Description": "Ensure the S3 bucket CloudTrail logs to is not publicly accessible", + "Risk": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected accounts use or configuration.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/logging_3#aws-console", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Analyze Bucket policy to validate appropriate permissions. Ensure the AllUsers principal is not granted privileges. Ensure the AuthenticatedUsers principal is not granted privileges.", + "Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.py b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.py index 84e4aaec..0ed332af 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.py +++ b/providers/aws/services/cloudtrail/cloudtrail_logs_s3_bucket_is_not_publicly_accessible/cloudtrail_logs_s3_bucket_is_not_publicly_accessible.py @@ -9,7 +9,7 @@ class cloudtrail_logs_s3_bucket_is_not_publicly_accessible(Check): for trail in cloudtrail_client.trails: if trail.name: trail_bucket = trail.s3_bucket - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = trail.region report.resource_id = trail.name report.resource_arn = trail.arn diff --git a/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.metadata.json index b1fbe6cd..f83f0807 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.metadata.json @@ -1,46 +1,38 @@ { - "Provider": "aws", - "CheckID": "cloudtrail_multi_region_enabled", - "CheckTitle": "Ensure CloudTrail is enabled in all regions", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "cloudtrail", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "high", - "ResourceType": "AwsCloudTrailTrail", - "Description": "Ensure CloudTrail is enabled in all regions", - "Risk": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller; the time of the API call; the source IP address of the API caller; the request parameters; and the response elements returned by the AWS service.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws cloudtrail create-trail --name --bucket-name --is-multi-region-trail aws cloudtrail update-trail --name --is-multi-region-trail ", - "NativeIaC": "https://docs.bridgecrew.io/docs/logging_1#cloudformation", - "Other": "https://docs.bridgecrew.io/docs/logging_1#aws-console", - "Terraform": "https://docs.bridgecrew.io/docs/logging_1#terraform" - }, - "Recommendation": { - "Text": "Ensure Logging is set to ON on all regions (even if they are not being used at the moment.", - "Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrailconcepts.html#cloudtrail-concepts-management-events" - } + "Provider": "aws", + "CheckID": "cloudtrail_multi_region_enabled", + "CheckTitle": "Ensure CloudTrail is enabled in all regions", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "cloudtrail", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "high", + "ResourceType": "AwsCloudTrailTrail", + "Description": "Ensure CloudTrail is enabled in all regions", + "Risk": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller; the time of the API call; the source IP address of the API caller; the request parameters; and the response elements returned by the AWS service.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws cloudtrail create-trail --name --bucket-name --is-multi-region-trail aws cloudtrail update-trail --name --is-multi-region-trail ", + "NativeIaC": "https://docs.bridgecrew.io/docs/logging_1#cloudformation", + "Other": "https://docs.bridgecrew.io/docs/logging_1#aws-console", + "Terraform": "https://docs.bridgecrew.io/docs/logging_1#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "2.1" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] - } + "Recommendation": { + "Text": "Ensure Logging is set to ON on all regions (even if they are not being used at the moment.", + "Url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrailconcepts.html#cloudtrail-concepts-management-events" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.py b/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.py index 9104ee01..6c237faa 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.py +++ b/providers/aws/services/cloudtrail/cloudtrail_multi_region_enabled/cloudtrail_multi_region_enabled.py @@ -7,7 +7,7 @@ class cloudtrail_multi_region_enabled(Check): findings = [] actual_region = None for trail in cloudtrail_client.trails: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = trail.region if trail.name: # Check if there are trails in region # Check if region has changed and add report of previous region diff --git a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.metadata.json index 292588d2..9e4cf6c5 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.metadata.json @@ -32,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.py b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.py index 7cf62aac..330530e7 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.py +++ b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_read_enabled/cloudtrail_s3_dataevents_read_enabled.py @@ -5,7 +5,7 @@ from providers.aws.services.cloudtrail.cloudtrail_client import cloudtrail_clien class cloudtrail_s3_dataevents_read_enabled(Check): def execute(self): findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cloudtrail_client.region report.resource_id = "No trails" report.resource_arn = "No trails" diff --git a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.metadata.json b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.metadata.json index 5f13d155..02c3ec87 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.metadata.json +++ b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.metadata.json @@ -32,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.py b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.py index 752054df..25cfeb72 100644 --- a/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.py +++ b/providers/aws/services/cloudtrail/cloudtrail_s3_dataevents_write_enabled/cloudtrail_s3_dataevents_write_enabled.py @@ -5,7 +5,7 @@ from providers.aws.services.cloudtrail.cloudtrail_client import cloudtrail_clien class cloudtrail_s3_dataevents_write_enabled(Check): def execute(self): findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cloudtrail_client.region report.resource_id = "No trails" report.resource_arn = "No trails" diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.metadata.json index 3d0e33fe..c14c9402 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_changes_to_network_acls_alarm_configured", "CheckTitle": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL).", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py index a5c1d22b..44615ec2 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py @@ -10,7 +10,7 @@ class cloudwatch_changes_to_network_acls_alarm_configured(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*CreateNetworkAcl.+\$\.eventName\s*=\s*CreateNetworkAclEntry.+\$\.eventName\s*=\s*DeleteNetworkAcl.+\$\.eventName\s*=\s*DeleteNetworkAclEntry.+\$\.eventName\s*=\s*ReplaceNetworkAclEntry.+\$\.eventName\s*=\s*ReplaceNetworkAclAssociation" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.metadata.json index 7fe327cf..24fa7e0b 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_changes_to_network_gateways_alarm_configured", "CheckTitle": "Ensure a log metric filter and alarm exist for changes to network gateways.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py index 1275814f..657b232c 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py @@ -10,7 +10,7 @@ class cloudwatch_changes_to_network_gateways_alarm_configured(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*CreateCustomerGateway.+\$\.eventName\s*=\s*DeleteCustomerGateway.+\$\.eventName\s*=\s*AttachInternetGateway.+\$\.eventName\s*=\s*CreateInternetGateway.+\$\.eventName\s*=\s*DeleteInternetGateway.+\$\.eventName\s*=\s*DetachInternetGateway" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.metadata.json index 9e7e7f12..23e2c010 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_changes_to_network_route_tables_alarm_configured", "CheckTitle": "Ensure a log metric filter and alarm exist for route table changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py index deda8725..cbd6a48d 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py @@ -10,7 +10,7 @@ class cloudwatch_changes_to_network_route_tables_alarm_configured(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*CreateRoute.+\$\.eventName\s*=\s*CreateRouteTable.+\$\.eventName\s*=\s*ReplaceRoute.+\$\.eventName\s*=\s*ReplaceRouteTableAssociation.+\$\.eventName\s*=\s*DeleteRouteTable.+\$\.eventName\s*=\s*DeleteRoute.+\$\.eventName\s*=\s*DisassociateRouteTable" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.metadata.json index 2ea771fa..036cc134 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_changes_to_vpcs_alarm_configured", "CheckTitle": "Ensure a log metric filter and alarm exist for VPC changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py b/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py index 4efdda55..8977d952 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py +++ b/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py @@ -10,7 +10,7 @@ class cloudwatch_changes_to_vpcs_alarm_configured(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*CreateVpc.+\$\.eventName\s*=\s*DeleteVpc.+\$\.eventName\s*=\s*ModifyVpcAttribute.+\$\.eventName\s*=\s*AcceptVpcPeeringConnection.+\$\.eventName\s*=\s*CreateVpcPeeringConnection.+\$\.eventName\s*=\s*DeleteVpcPeeringConnection.+\$\.eventName\s*=\s*RejectVpcPeeringConnection.+\$\.eventName\s*=\s*AttachClassicLinkVpc.+\$\.eventName\s*=\s*DetachClassicLinkVpc.+\$\.eventName\s*=\s*DisableVpcClassicLink.+\$\.eventName\s*=\s*EnableVpcClassicLink" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.metadata.json index ef547020..59ed5495 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_cross_account_sharing_disabled", "CheckTitle": "Check if CloudWatch has allowed cross-account sharing.", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.py b/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.py index 07c8a101..674f814b 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.py +++ b/providers/aws/services/cloudwatch/cloudwatch_cross_account_sharing_disabled/cloudwatch_cross_account_sharing_disabled.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class cloudwatch_cross_account_sharing_disabled(Check): def execute(self): findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "PASS" report.status_extended = "CloudWatch doesn't allows cross-account sharing" report.resource_id = "CloudWatch-CrossAccountSharingRole" diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.metadata.json index dd72fca7..7f3ca261 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_group_kms_encryption_enabled", "CheckTitle": "Check if CloudWatch log groups are protected by AWS KMS.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "cloudwatch", "SubServiceName": "logs", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.py b/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.py index 8ed8d910..32376eff 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_group_kms_encryption_enabled/cloudwatch_log_group_kms_encryption_enabled.py @@ -6,7 +6,7 @@ class cloudwatch_log_group_kms_encryption_enabled(Check): def execute(self): findings = [] for log_group in logs_client.log_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = log_group.region report.resource_id = log_group.name report.resource_arn = log_group.arn diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.metadata.json index b73df627..3600c497 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_group_retention_policy_specific_days_enabled", "CheckTitle": "Check if CloudWatch Log Groups have a retention policy of specific days.", - "CheckType": ["Data Retention"], + "CheckType": [ + "Data Retention" + ], "ServiceName": "cloudwatch", "SubServiceName": "logs", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py b/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py index 482c7b60..3a4a12f9 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py @@ -8,7 +8,7 @@ class cloudwatch_log_group_retention_policy_specific_days_enabled(Check): findings = [] specific_retention_days = get_config_var("log_group_retention_days") for log_group in logs_client.log_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = log_group.region report.resource_id = log_group.name report.resource_arn = log_group.arn diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.metadata.json index f6eb4936..830a1936 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled", "CheckTitle": "Ensure a log metric filter and alarm exist for AWS Config configuration changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py index e93f5281..5dde517c 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py @@ -12,7 +12,7 @@ class cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_change def execute(self): pattern = r"\$\.eventSource\s*=\s*config.amazonaws.com.+\$\.eventName\s*=\s*StopConfigurationRecorder.+\$\.eventName\s*=\s*DeleteDeliveryChannel.+\$\.eventName\s*=\s*PutDeliveryChannel.+\$\.eventName\s*=\s*PutConfigurationRecorder" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.metadata.json index a9bf16d8..5fcb89b4 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled", "CheckTitle": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py index 13baaebb..0726648f 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py @@ -12,7 +12,7 @@ class cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_change def execute(self): pattern = r"\$\.eventName\s*=\s*CreateTrail.+\$\.eventName\s*=\s*UpdateTrail.+\$\.eventName\s*=\s*DeleteTrail.+\$\.eventName\s*=\s*StartLogging.+\$\.eventName\s*=\s*StopLogging" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.metadata.json index 47583656..43862b74 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_authentication_failures", "CheckTitle": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py index b68e6e33..48626472 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_authentication_failures(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*ConsoleLogin.+\$\.errorMessage\s*=\s*Failed authentication" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.metadata.json index 6ebd48f4..a8ff6146 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_aws_organizations_changes", "CheckTitle": "Ensure a log metric filter and alarm exist for AWS Organizations changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py index fabc6ead..c12f8659 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_aws_organizations_changes(Check): def execute(self): pattern = r"\$\.eventSource\s*=\s*organizations\.amazonaws\.com.+\$\.eventName\s*=\s*AcceptHandshake.+\$\.eventName\s*=\s*AttachPolicy.+\$\.eventName\s*=\s*CancelHandshake.+\$\.eventName\s*=\s*CreateAccount.+\$\.eventName\s*=\s*CreateOrganization.+\$\.eventName\s*=\s*CreateOrganizationalUnit.+\$\.eventName\s*=\s*CreatePolicy.+\$\.eventName\s*=\s*DeclineHandshake.+\$\.eventName\s*=\s*DeleteOrganization.+\$\.eventName\s*=\s*DeleteOrganizationalUnit.+\$\.eventName\s*=\s*DeletePolicy.+\$\.eventName\s*=\s*EnableAllFeatures.+\$\.eventName\s*=\s*EnablePolicyType.+\$\.eventName\s*=\s*InviteAccountToOrganization.+\$\.eventName\s*=\s*LeaveOrganization.+\$\.eventName\s*=\s*DetachPolicy.+\$\.eventName\s*=\s*DisablePolicyType.+\$\.eventName\s*=\s*MoveAccount.+\$\.eventName\s*=\s*RemoveAccountFromOrganization.+\$\.eventName\s*=\s*UpdateOrganizationalUnit.+\$\.eventName\s*=\s*UpdatePolicy" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.metadata.json index 685c0c46..2143abb5 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk", "CheckTitle": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created KMS CMKs.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py index e61d40a9..99927eff 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk(Chec def execute(self): pattern = r"\$\.eventSource\s*=\s*kms.amazonaws.com.+\$\.eventName\s*=\s*DisableKey.+\$\.eventName\s*=\s*ScheduleKeyDeletion" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.metadata.json index d2ea9bac..4b3c4e68 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_for_s3_bucket_policy_changes", "CheckTitle": "Ensure a log metric filter and alarm exist for S3 bucket policy changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py index 5230a685..633674f5 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_for_s3_bucket_policy_changes(Check): def execute(self): pattern = r"\$\.eventSource\s*=\s*s3.amazonaws.com.+\$\.eventName\s*=\s*PutBucketAcl.+\$\.eventName\s*=\s*PutBucketPolicy.+\$\.eventName\s*=\s*PutBucketCors.+\$\.eventName\s*=\s*PutBucketLifecycle.+\$\.eventName\s*=\s*PutBucketReplication.+\$\.eventName\s*=\s*DeleteBucketPolicy.+\$\.eventName\s*=\s*DeleteBucketCors.+\$\.eventName\s*=\s*DeleteBucketLifecycle.+\$\.eventName\s*=\s*DeleteBucketReplication" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.metadata.json index fab0de2b..8b6c37ca 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_policy_changes", "CheckTitle": "Ensure a log metric filter and alarm exist for IAM policy changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py index c1afd3eb..b14fc767 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_policy_changes(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*DeleteGroupPolicy.+\$\.eventName\s*=\s*DeleteRolePolicy.+\$\.eventName\s*=\s*DeleteUserPolicy.+\$\.eventName\s*=\s*PutGroupPolicy.+\$\.eventName\s*=\s*PutRolePolicy.+\$\.eventName\s*=\s*PutUserPolicy.+\$\.eventName\s*=\s*CreatePolicy.+\$\.eventName\s*=\s*DeletePolicy.+\$\.eventName\s*=\s*CreatePolicyVersion.+\$\.eventName\s*=\s*DeletePolicyVersion.+\$\.eventName\s*=\s*AttachRolePolicy.+\$\.eventName\s*=\s*DetachRolePolicy.+\$\.eventName\s*=\s*AttachUserPolicy.+\$\.eventName\s*=\s*DetachUserPolicy.+\$\.eventName\s*=\s*AttachGroupPolicy.+\$\.eventName\s*=\s*DetachGroupPolicy" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.metadata.json index f2e35960..b3b2234a 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_root_usage", "CheckTitle": "Ensure a log metric filter and alarm exist for usage of root account.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py index e49456d2..96a94525 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_root_usage(Check): def execute(self): pattern = r"\$\.userIdentity\.type\s*=\s*Root.+\$\.userIdentity\.invokedBy NOT EXISTS.+\$\.eventType\s*!=\s*AwsServiceEvent" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.metadata.json index f24e6621..797cef63 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_security_group_changes", "CheckTitle": "Ensure a log metric filter and alarm exist for security group changes.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py index df18e791..23f1dc8b 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_security_group_changes(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*AuthorizeSecurityGroupIngress.+\$\.eventName\s*=\s*AuthorizeSecurityGroupEgress.+\$\.eventName\s*=\s*RevokeSecurityGroupIngress.+\$\.eventName\s*=\s*RevokeSecurityGroupEgress.+\$\.eventName\s*=\s*CreateSecurityGroup.+\$\.eventName\s*=\s*DeleteSecurityGroup" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.metadata.json index eee7b54b..b196791c 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_sign_in_without_mfa", "CheckTitle": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py index 70e8ec8d..aead862b 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_sign_in_without_mfa(Check): def execute(self): pattern = r"\$\.eventName\s*=\s*ConsoleLogin.+\$\.additionalEventData\.MFAUsed\s*!=\s*Yes" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.metadata.json b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.metadata.json index 73f1cbe9..fc12812b 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.metadata.json +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "cloudwatch_log_metric_filter_unauthorized_api_calls", "CheckTitle": "Ensure a log metric filter and alarm exist for unauthorized API calls.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "cloudwatch", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:cloudwatch:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py index ba9ef9ac..be788b9b 100644 --- a/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py +++ b/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py @@ -10,7 +10,7 @@ class cloudwatch_log_metric_filter_unauthorized_api_calls(Check): def execute(self): pattern = r"\$\.errorCode\s*=\s*\*UnauthorizedOperation.+\$\.errorCode\s*=\s*AccessDenied\*" findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = ( "No CloudWatch log groups found with metric filters or alarms associated." diff --git a/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.metadata.json b/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.metadata.json index 19d1387d..90938544 100644 --- a/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.metadata.json +++ b/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.py b/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.py index ec089210..465edff7 100644 --- a/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.py +++ b/providers/aws/services/codeartifact/codeartifact_packages_external_public_publishing_disabled/codeartifact_packages_external_public_publishing_disabled.py @@ -11,7 +11,7 @@ class codeartifact_packages_external_public_publishing_disabled(Check): findings = [] for repository in codeartifact_client.repositories.values(): for package in repository.packages: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = repository.region report.resource_id = package.name diff --git a/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.metadata.json b/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.metadata.json index 21645158..726bd06a 100644 --- a/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.metadata.json +++ b/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "codebuild_project_older_90_days", - "CheckTitle": "Ensure CodeBuild Project has been invoked in the last 90 days", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards"], - "ServiceName": "codebuild", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsCodeBuildProject", - "Description": "Ensure CodeBuild Project has been invoked in the last 90 days", - "Risk": "Older CodeBuild projects can be checked to see if they are currently in use.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Check if CodeBuild project are really in use and remove the stale ones", - "Url": "https://docs.aws.amazon.com/codebuild/latest/userguide/delete-project.html" - } + "Provider": "aws", + "CheckID": "codebuild_project_older_90_days", + "CheckTitle": "Ensure CodeBuild Project has been invoked in the last 90 days", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards" + ], + "ServiceName": "codebuild", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsCodeBuildProject", + "Description": "Ensure CodeBuild Project has been invoked in the last 90 days", + "Risk": "Older CodeBuild projects can be checked to see if they are currently in use.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Check if CodeBuild project are really in use and remove the stale ones", + "Url": "https://docs.aws.amazon.com/codebuild/latest/userguide/delete-project.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.py b/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.py index 70ae2d11..2eb227c8 100644 --- a/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.py +++ b/providers/aws/services/codebuild/codebuild_project_older_90_days/codebuild_project_older_90_days.py @@ -8,7 +8,7 @@ class codebuild_project_older_90_days(Check): def execute(self): findings = [] for project in codebuild_client.projects: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = project.region report.resource_id = project.name report.resource_arn = "" diff --git a/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.metadata.json b/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.metadata.json index 7131a5e5..fe37ffcb 100644 --- a/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.metadata.json +++ b/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "codebuild_project_user_controlled_buildspec", - "CheckTitle": "Ensure CodeBuild Project uses a controlled buildspec", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards"], - "ServiceName": "codebuild", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsCodeBuildProject", - "Description": "Ensure CodeBuild Project uses a controlled buildspec", - "Risk": "The CodeBuild projects with user controlled buildspec", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Use buildspec.yml from a trusted source which user cant interfere with", - "Url": "https://docs.aws.amazon.com/codebuild/latest/userguide/security.html" - } + "Provider": "aws", + "CheckID": "codebuild_project_user_controlled_buildspec", + "CheckTitle": "Ensure CodeBuild Project uses a controlled buildspec", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards" + ], + "ServiceName": "codebuild", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsCodeBuildProject", + "Description": "Ensure CodeBuild Project uses a controlled buildspec", + "Risk": "The CodeBuild projects with user controlled buildspec", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Use buildspec.yml from a trusted source which user cant interfere with", + "Url": "https://docs.aws.amazon.com/codebuild/latest/userguide/security.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.py b/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.py index e32f8732..a254bef9 100644 --- a/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.py +++ b/providers/aws/services/codebuild/codebuild_project_user_controlled_buildspec/codebuild_project_user_controlled_buildspec.py @@ -8,7 +8,7 @@ class codebuild_project_user_controlled_buildspec(Check): def execute(self): findings = [] for project in codebuild_client.projects: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = project.region report.resource_id = project.name report.resource_arn = "" diff --git a/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.metadata.json b/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.metadata.json index 0cbcba43..707a11f9 100644 --- a/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.metadata.json +++ b/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "config_recorder_all_regions_enabled", "CheckTitle": "Ensure AWS Config is enabled in all regions.", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "config", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-recorder:region:account-id:recorder/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.py b/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.py index 82ef2922..d5590f33 100644 --- a/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.py +++ b/providers/aws/services/config/config_recorder_all_regions_enabled/config_recorder_all_regions_enabled.py @@ -6,9 +6,9 @@ class config_recorder_all_regions_enabled(Check): def execute(self): findings = [] for recorder in config_client.recorders: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = recorder.region - report.resource_id = recorder.name + report.resource_id = "" if not recorder.name else recorder.name # Check if Config is enabled in region if not recorder.name: report.status = "FAIL" diff --git a/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.metadata.json b/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.metadata.json index 448d34c0..f20be74d 100644 --- a/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.metadata.json +++ b/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.py b/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.py index 3525abea..5cd281c3 100644 --- a/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.py +++ b/providers/aws/services/directoryservice/directoryservice_directory_log_forwarding_enabled/directoryservice_directory_log_forwarding_enabled.py @@ -8,7 +8,7 @@ class directoryservice_directory_log_forwarding_enabled(Check): def execute(self): findings = [] for directory in directoryservice_client.directories.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = directory.region report.resource_id = directory.id if directory.log_subscriptions: diff --git a/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.metadata.json b/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.metadata.json index 77f81aa4..a394a2f0 100644 --- a/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.metadata.json +++ b/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.py b/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.py index 7b46f5c9..83359add 100644 --- a/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.py +++ b/providers/aws/services/directoryservice/directoryservice_directory_monitor_notifications/directoryservice_directory_monitor_notifications.py @@ -8,7 +8,7 @@ class directoryservice_directory_monitor_notifications(Check): def execute(self): findings = [] for directory in directoryservice_client.directories.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = directory.region report.resource_id = directory.id if directory.event_topics: diff --git a/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.metadata.json b/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.metadata.json index 85efa916..ad0e11d3 100644 --- a/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.metadata.json +++ b/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.py b/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.py index 3b636aa7..cf747cb1 100644 --- a/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.py +++ b/providers/aws/services/directoryservice/directoryservice_directory_snapshots_limit/directoryservice_directory_snapshots_limit.py @@ -11,7 +11,7 @@ class directoryservice_directory_snapshots_limit(Check): def execute(self): findings = [] for directory in directoryservice_client.directories.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = directory.region report.resource_id = directory.id if directory.snapshots_limits: diff --git a/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.metadata.json b/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.metadata.json index 3cddfd00..527dac88 100644 --- a/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.metadata.json +++ b/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.py b/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.py index 89687b11..0ec4b95a 100644 --- a/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.py +++ b/providers/aws/services/directoryservice/directoryservice_ldap_certificate_expiration/directoryservice_ldap_certificate_expiration.py @@ -14,7 +14,7 @@ class directoryservice_ldap_certificate_expiration(Check): findings = [] for directory in directoryservice_client.directories.values(): for certificate in directory.certificates: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = directory.region report.resource_id = certificate.id diff --git a/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.metadata.json b/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.metadata.json index 700e3825..a5bdf3cf 100644 --- a/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.metadata.json +++ b/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.py b/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.py index 1981792e..bffb3b9d 100644 --- a/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.py +++ b/providers/aws/services/directoryservice/directoryservice_radius_server_security_protocol/directoryservice_radius_server_security_protocol.py @@ -12,7 +12,7 @@ class directoryservice_radius_server_security_protocol(Check): findings = [] for directory in directoryservice_client.directories.values(): if directory.radius_settings: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = directory.region report.resource_id = directory.id if ( diff --git a/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.metadata.json b/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.metadata.json index d3fb4061..d2a6eeac 100644 --- a/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.metadata.json +++ b/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.py b/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.py index 9b04526f..f9f9bae6 100644 --- a/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.py +++ b/providers/aws/services/directoryservice/directoryservice_supported_mfa_radius_enabled/directoryservice_supported_mfa_radius_enabled.py @@ -12,7 +12,7 @@ class directoryservice_supported_mfa_radius_enabled(Check): findings = [] for directory in directoryservice_client.directories.values(): if directory.radius_settings: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = directory.region report.resource_id = directory.id if directory.radius_settings.status == RadiusStatus.Completed: diff --git a/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.metadata.json b/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.metadata.json index ab02243f..0d1c8c40 100644 --- a/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.metadata.json +++ b/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "dynamodb_accelerator_cluster_encryption_enabled", "CheckTitle": "Check if DynamoDB DAX Clusters are encrypted at rest.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "dynamodb", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:dynamodb:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.py b/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.py index ba5f5383..66f289c1 100644 --- a/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.py +++ b/providers/aws/services/dynamodb/dynamodb_accelerator_cluster_encryption_enabled/dynamodb_accelerator_cluster_encryption_enabled.py @@ -6,7 +6,7 @@ class dynamodb_accelerator_cluster_encryption_enabled(Check): def execute(self): findings = [] for cluster in dax_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = cluster.name report.resource_arn = cluster.arn report.region = cluster.region diff --git a/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.metadata.json b/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.metadata.json index 2f5eae49..bd3fbcb2 100644 --- a/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.metadata.json +++ b/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "dynamodb_tables_kms_cmk_encryption_enabled", "CheckTitle": "Check if DynamoDB table has encryption at rest enabled using CMK KMS.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "dynamodb", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:dynamodb:region:account-id:table/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.py b/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.py index 71a8422a..c25381d3 100644 --- a/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.py +++ b/providers/aws/services/dynamodb/dynamodb_tables_kms_cmk_encryption_enabled/dynamodb_tables_kms_cmk_encryption_enabled.py @@ -6,7 +6,7 @@ class dynamodb_tables_kms_cmk_encryption_enabled(Check): def execute(self): findings = [] for table in dynamodb_client.tables: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = table.name report.resource_arn = table.arn report.region = table.region diff --git a/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.metadata.json b/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.metadata.json index fe20f228..fcc35a3b 100644 --- a/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.metadata.json +++ b/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "dynamodb_tables_pitr_enabled", "CheckTitle": "Check if DynamoDB tables point-in-time recovery (PITR) is enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "dynamodb", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:dynamodb:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.py b/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.py index 1e48cd2a..b5fca8b9 100644 --- a/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.py +++ b/providers/aws/services/dynamodb/dynamodb_tables_pitr_enabled/dynamodb_tables_pitr_enabled.py @@ -6,7 +6,7 @@ class dynamodb_tables_pitr_enabled(Check): def execute(self): findings = [] for table in dynamodb_client.tables: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = table.name report.resource_arn = table.arn report.region = table.region diff --git a/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.metadata.json b/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.metadata.json index 6a6feba2..8c412b24 100644 --- a/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.metadata.json +++ b/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_ami_public", "CheckTitle": "Ensure there are no EC2 AMIs set as Public.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "ami", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.py b/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.py index 2320beba..0ba3504e 100644 --- a/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.py +++ b/providers/aws/services/ec2/ec2_ami_public/ec2_ami_public.py @@ -6,7 +6,7 @@ class ec2_ami_public(Check): def execute(self): findings = [] for image in ec2_client.images: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = image.region report.resource_id = image.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.metadata.json b/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.metadata.json index 336d3ec7..80c7da49 100644 --- a/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.metadata.json +++ b/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_ebs_default_encryption", "CheckTitle": "Check if EBS Default Encryption is activated.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "ec2", "SubServiceName": "ebs", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.py b/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.py index 08e885ad..55eb1c20 100644 --- a/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.py +++ b/providers/aws/services/ec2/ec2_ebs_default_encryption/ec2_ebs_default_encryption.py @@ -6,7 +6,7 @@ class ec2_ebs_default_encryption(Check): def execute(self): findings = [] for ebs_encryption in ec2_client.ebs_encryption_by_default: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = ebs_encryption.region report.resource_id = "EBS Default Encryption" report.status = "FAIL" diff --git a/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.metadata.json b/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.metadata.json index 84f7ebb3..8d893d20 100644 --- a/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.metadata.json +++ b/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_ebs_public_snapshot", "CheckTitle": "Ensure there are no EBS Snapshots set as Public.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "ec2", "SubServiceName": "snapshot", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.py b/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.py index 08170c76..6cccc2fb 100644 --- a/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.py +++ b/providers/aws/services/ec2/ec2_ebs_public_snapshot/ec2_ebs_public_snapshot.py @@ -6,7 +6,7 @@ class ec2_ebs_public_snapshot(Check): def execute(self): findings = [] for snapshot in ec2_client.snapshots: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = snapshot.region if not snapshot.public: report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.metadata.json b/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.metadata.json index b4862920..4c2afe8c 100644 --- a/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.metadata.json +++ b/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_ebs_snapshots_encrypted", "CheckTitle": "Check if EBS snapshots are encrypted.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "ec2", "SubServiceName": "snapshot", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.py b/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.py index 045a33e4..de944874 100644 --- a/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.py +++ b/providers/aws/services/ec2/ec2_ebs_snapshots_encrypted/ec2_ebs_snapshots_encrypted.py @@ -6,7 +6,7 @@ class ec2_ebs_snapshots_encrypted(Check): def execute(self): findings = [] for snapshot in ec2_client.snapshots: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = snapshot.region if snapshot.encrypted: report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.metadata.json b/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.metadata.json index ca60efe0..29d28fe5 100644 --- a/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.metadata.json +++ b/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_ebs_volume_encryption", "CheckTitle": "Ensure there are no EBS Volumes unencrypted.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "ec2", "SubServiceName": "volume", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.py b/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.py index d8e8543d..3a8eebdb 100644 --- a/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.py +++ b/providers/aws/services/ec2/ec2_ebs_volume_encryption/ec2_ebs_volume_encryption.py @@ -6,7 +6,7 @@ class ec2_ebs_volume_encryption(Check): def execute(self): findings = [] for volume in ec2_client.volumes: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = volume.region report.resource_id = volume.id if volume.encrypted: diff --git a/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.metadata.json b/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.metadata.json index 8210db88..688f69c8 100644 --- a/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.metadata.json +++ b/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_elastic_ip_shodan", "CheckTitle": "Check if any of the Elastic or Public IP are in Shodan (requires Shodan API KEY).", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.py b/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.py index 0db2bf2e..9f276abf 100644 --- a/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.py +++ b/providers/aws/services/ec2/ec2_elastic_ip_shodan/ec2_elastic_ip_shodan.py @@ -13,7 +13,7 @@ class ec2_elastic_ip_shodan(Check): if shodan_api_key: api = shodan.Shodan(shodan_api_key) for eip in ec2_client.elastic_ips: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = eip.region if eip.public_ip: try: diff --git a/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.metadata.json b/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.metadata.json index 66aeb023..f865eb43 100644 --- a/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.metadata.json +++ b/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_elastic_ip_unassgined", "CheckTitle": "Check if there is any unassigned Elastic IP.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "AwsElasticIPs", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.py b/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.py index 18353b9f..306ed1b6 100644 --- a/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.py +++ b/providers/aws/services/ec2/ec2_elastic_ip_unassgined/ec2_elastic_ip_unassgined.py @@ -6,7 +6,7 @@ class ec2_elastic_ip_unassgined(Check): def execute(self): findings = [] for eip in ec2_client.elastic_ips: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = eip.region if eip.public_ip: report.resource_id = eip.public_ip diff --git a/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.metadata.json b/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.metadata.json index 6cd364a3..2191005a 100644 --- a/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.metadata.json +++ b/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_instance_imdsv2_enabled", "CheckTitle": "Check if EC2 Instance Metadata Service Version 2 (IMDSv2) is Enabled and Required.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.py b/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.py index 7c16365f..d795dc15 100644 --- a/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.py +++ b/providers/aws/services/ec2/ec2_instance_imdsv2_enabled/ec2_instance_imdsv2_enabled.py @@ -6,7 +6,7 @@ class ec2_instance_imdsv2_enabled(Check): def execute(self): findings = [] for instance in ec2_client.instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = instance.region report.resource_id = instance.id report.status = "FAIL" diff --git a/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.metadata.json b/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.metadata.json index cd4bf63c..07217ce1 100644 --- a/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.metadata.json +++ b/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_instance_internet_facing_with_instance_profile", "CheckTitle": "Check for internet facing EC2 instances with Instance Profiles attached.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py b/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py index e4a4a6bf..cd23f035 100644 --- a/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py +++ b/providers/aws/services/ec2/ec2_instance_internet_facing_with_instance_profile/ec2_instance_internet_facing_with_instance_profile.py @@ -6,7 +6,7 @@ class ec2_instance_internet_facing_with_instance_profile(Check): def execute(self): findings = [] for instance in ec2_client.instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = instance.region report.resource_id = instance.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.metadata.json b/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.metadata.json index 8c6e24b7..6eaf4de3 100644 --- a/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.metadata.json +++ b/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_instance_managed_by_ssm", "CheckTitle": "Check if EC2 instances are managed by Systems Manager.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "instance", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.py b/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.py index e2387ccb..e47890ba 100644 --- a/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.py +++ b/providers/aws/services/ec2/ec2_instance_managed_by_ssm/ec2_instance_managed_by_ssm.py @@ -7,7 +7,7 @@ class ec2_instance_managed_by_ssm(Check): def execute(self): findings = [] for instance in ec2_client.instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = instance.region if not ssm_client.managed_instances.get(instance.id): report.status = "FAIL" diff --git a/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.metadata.json b/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.metadata.json index 93c5f90d..daaa5815 100644 --- a/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.metadata.json +++ b/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_instance_older_than_specific_days", "CheckTitle": "Check EC2 Instances older than specific days.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.py b/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.py index 3fbb680b..27bf9f96 100644 --- a/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.py +++ b/providers/aws/services/ec2/ec2_instance_older_than_specific_days/ec2_instance_older_than_specific_days.py @@ -10,7 +10,7 @@ class ec2_instance_older_than_specific_days(Check): findings = [] max_ec2_instance_age_in_days = get_config_var("max_ec2_instance_age_in_days") for instance in ec2_client.instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = instance.region report.resource_id = instance.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json index b1def736..b284c918 100644 --- a/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json +++ b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.metadata.json @@ -32,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py index fb3869ef..5cf832b7 100644 --- a/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py +++ b/providers/aws/services/ec2/ec2_instance_profile_attached/ec2_instance_profile_attached.py @@ -6,7 +6,7 @@ class ec2_instance_profile_attached(Check): def execute(self): findings = [] for instance in ec2_client.instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = instance.region report.resource_id = instance.id report.status = "FAIL" diff --git a/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.metadata.json b/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.metadata.json index 08d0265b..7b6fc446 100644 --- a/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.metadata.json +++ b/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_instance_public_ip", "CheckTitle": "Check for EC2 Instances with Public IP.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "instance", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.py b/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.py index 49ada205..90ecb3bb 100644 --- a/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.py +++ b/providers/aws/services/ec2/ec2_instance_public_ip/ec2_instance_public_ip.py @@ -6,7 +6,7 @@ class ec2_instance_public_ip(Check): def execute(self): findings = [] for instance in ec2_client.instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = instance.region if instance.public_ip: report.status = "FAIL" diff --git a/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.metadata.json b/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.metadata.json index c1ca2f13..e3257a2c 100644 --- a/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.metadata.json +++ b/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_instance_secrets_user_data", "CheckTitle": "Find secrets in EC2 User Data.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "ec2", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id", @@ -30,7 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - ] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py b/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py index d824a6da..184c0076 100644 --- a/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py +++ b/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py @@ -13,7 +13,7 @@ class ec2_instance_secrets_user_data(Check): def execute(self): findings = [] for instance in ec2_client.instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = instance.region report.resource_id = instance.id diff --git a/providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/__init__.py b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/__init__.py similarity index 100% rename from providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/__init__.py rename to providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/__init__.py diff --git a/providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.metadata.json b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.metadata.json similarity index 83% rename from providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.metadata.json rename to providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.metadata.json index 0e486e20..7cfb1dd4 100644 --- a/providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.metadata.json +++ b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.metadata.json @@ -1,8 +1,12 @@ { "Provider": "aws", - "CheckID": "ec2_network_acls_allow_ingress_any_port", + "CheckID": "ec2_networkacl_allow_ingress_any_port", "CheckTitle": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to any port.", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards", "CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "ec2", "SubServiceName": "networkacl", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Infrastructure Security", - "Compliance": [] + "Notes": "Infrastructure Security" } diff --git a/providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.py b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.py similarity index 90% rename from providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.py rename to providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.py index 6c0b5e14..b64c27c7 100644 --- a/providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port.py +++ b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port.py @@ -3,13 +3,13 @@ from providers.aws.services.ec2.ec2_client import ec2_client from providers.aws.services.ec2.lib.network_acls import check_network_acl -class ec2_network_acls_allow_ingress_any_port(Check): +class ec2_networkacl_allow_ingress_any_port(Check): def execute(self): findings = [] tcp_protocol = "-1" check_port = 0 for network_acl in ec2_client.network_acls: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = network_acl.region report.resource_id = network_acl.id # If some entry allows it, that ACL is not securely configured diff --git a/providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port_test.py b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port_test.py similarity index 72% rename from providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port_test.py rename to providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port_test.py index 4b3b18d8..11c5f4da 100644 --- a/providers/aws/services/ec2/ec2_network_acls_allow_ingress_any_port/ec2_network_acls_allow_ingress_any_port_test.py +++ b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_any_port/ec2_networkacl_allow_ingress_any_port_test.py @@ -6,7 +6,7 @@ from moto import mock_ec2 AWS_REGION = "us-east-1" -class Test_ec2_network_acls_allow_ingress_any_port: +class ec2_networkacl_allow_ingress_any_port: @mock_ec2 def test_ec2_default_nacls(self): @@ -16,15 +16,15 @@ class Test_ec2_network_acls_allow_ingress_any_port: current_audit_info.audited_partition = "aws" with mock.patch( - "providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port.ec2_client", + "providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port.ec2_client", new=EC2(current_audit_info), ): # Test Check - from providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port import ( - ec2_network_acls_allow_ingress_any_port, + from providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port import ( + ec2_networkacl_allow_ingress_any_port, ) - check = ec2_network_acls_allow_ingress_any_port() + check = ec2_networkacl_allow_ingress_any_port() result = check.execute() # One default nacl per region @@ -39,15 +39,15 @@ class Test_ec2_network_acls_allow_ingress_any_port: current_audit_info.audited_partition = "aws" with mock.patch( - "providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port.ec2_client", + "providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port.ec2_client", new=EC2(current_audit_info), ): # Test Check - from providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port import ( - ec2_network_acls_allow_ingress_any_port, + from providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port import ( + ec2_networkacl_allow_ingress_any_port, ) - check = ec2_network_acls_allow_ingress_any_port() + check = ec2_networkacl_allow_ingress_any_port() result = check.execute() # One default sg per region @@ -83,15 +83,15 @@ class Test_ec2_network_acls_allow_ingress_any_port: current_audit_info.audited_partition = "aws" with mock.patch( - "providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port.ec2_client", + "providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port.ec2_client", new=EC2(current_audit_info), ): # Test Check - from providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port import ( - ec2_network_acls_allow_ingress_any_port, + from providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port import ( + ec2_networkacl_allow_ingress_any_port, ) - check = ec2_network_acls_allow_ingress_any_port() + check = ec2_networkacl_allow_ingress_any_port() result = check.execute() # One default sg per region + default of new VPC + new NACL @@ -128,15 +128,15 @@ class Test_ec2_network_acls_allow_ingress_any_port: current_audit_info.audited_partition = "aws" with mock.patch( - "providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port.ec2_client", + "providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port.ec2_client", new=EC2(current_audit_info), ): # Test Check - from providers.aws.services.ec2.ec2_network_acls_allow_ingress_any_port.ec2_network_acls_allow_ingress_any_port import ( - ec2_network_acls_allow_ingress_any_port, + from providers.aws.services.ec2.ec2_networkacl_allow_ingress_any_port.ec2_networkacl_allow_ingress_any_port import ( + ec2_networkacl_allow_ingress_any_port, ) - check = ec2_network_acls_allow_ingress_any_port() + check = ec2_networkacl_allow_ingress_any_port() result = check.execute() # One default sg per region + default of new VPC + new NACL diff --git a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.metadata.json b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.metadata.json index db4f57fc..0ef674fc 100644 --- a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.metadata.json +++ b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_networkacl_allow_ingress_tcp_port_22", "CheckTitle": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to SSH port 22", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "networkacl", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "4.5" - ], - "Framework": "CIS-AWS", - "Group": [ - "level2" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.py b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.py index 417bd2f3..a6c13a6a 100644 --- a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.py +++ b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_22/ec2_networkacl_allow_ingress_tcp_port_22.py @@ -9,7 +9,7 @@ class ec2_networkacl_allow_ingress_tcp_port_22(Check): tcp_protocol = "6" check_port = 22 for network_acl in ec2_client.network_acls: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = network_acl.region # If some entry allows it, that ACL is not securely configured if not check_network_acl(network_acl.entries, tcp_protocol, check_port): diff --git a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.metadata.json b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.metadata.json index 2f9d408a..62bc2eb1 100644 --- a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.metadata.json +++ b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_networkacl_allow_ingress_tcp_port_3389", "CheckTitle": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to Microsoft RDP port 3389", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "networkacl", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "4.6" - ], - "Framework": "CIS-AWS", - "Group": [ - "level2" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.py b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.py index beba3101..0e1d664b 100644 --- a/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.py +++ b/providers/aws/services/ec2/ec2_networkacl_allow_ingress_tcp_port_3389/ec2_networkacl_allow_ingress_tcp_port_3389.py @@ -9,7 +9,7 @@ class ec2_networkacl_allow_ingress_tcp_port_3389(Check): tcp_protocol = "6" check_port = 3389 for network_acl in ec2_client.network_acls: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = network_acl.region # If some entry allows it, that ACL is not securely configured if not check_network_acl(network_acl.entries, tcp_protocol, check_port): diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.metadata.json index 25c9fd51..dacae077 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_any_port", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to any port.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.py index 422730e8..632857d8 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_any_port/ec2_securitygroup_allow_ingress_from_internet_to_any_port.py @@ -7,7 +7,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_any_port(Check): def execute(self): findings = [] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.status = "PASS" report.status_extended = f"Security group {security_group.name} ({security_group.id}) has not all ports open to the Internet." diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.metadata.json index 6ba6ea9a..13ce9542 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to MongoDB ports 27017 and 27018.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.py index c8a84372..d08146e6 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018/ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_port_mongodb_27017_27018( findings = [] check_ports = [27017, 27018] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.metadata.json index 31f56429..4f1c6740 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to FTP ports 20 or 21.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "4.1" - ], - "Framework": "CIS-AWS", - "Group": [ - "level2" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.py index bf88ed18..15d6a83f 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21/ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_ftp_port_20_21(Check) findings = [] check_ports = [20, 21] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.status = "PASS" report.status_extended = f"Security group {security_group.name} ({security_group.id}) has not FTP ports 20 and 21 open to the Internet." diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.metadata.json index abdce7a9..cd0cfb7a 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to SSH port 22.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "4.1" - ], - "Framework": "CIS-AWS", - "Group": [ - "level2" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.py index 23a6aae7..33a5d8f3 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_22(Check): findings = [] check_ports = [22] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.status = "PASS" report.status_extended = f"Security group {security_group.name} ({security_group.id}) has not SSH port 22 open to the Internet." diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.metadata.json index 6fdec1ad..67d25422 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to port 3389.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "4.2" - ], - "Framework": "CIS-AWS", - "Group": [ - "level2" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.py index e75963fa..eddbe908 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_3389(Check): findings = [] check_ports = [3389] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.status = "PASS" report.status_extended = f"Security group {security_group.name} ({security_group.id}) has not Microsoft RDP port 3389 open to the Internet." diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.metadata.json index f0549faa..c28ab1f2 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Cassandra ports 7199 or 9160 or 8888.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.py index 6a093a43..83c9a250 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9160_8888.py @@ -10,7 +10,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_cassandra_7199_9 findings = [] check_ports = [7199, 9160, 8888] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.metadata.json index 0d2c8eaf..85489996 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Elasticsearch/Kibana ports.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.py index 0572319c..9f27cc03 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_kibana_9200_9300_5601.py @@ -10,7 +10,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_elasticsearch_ki findings = [] check_ports = [9200, 9300, 5601] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.metadata.json index c2e0ad86..a39025aa 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Kafka port 9092.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.py index 8b34d0ae..22c96ac3 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092(Check findings = [] check_ports = [9092] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.metadata.json index 36520e9d..272a8d59 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Memcached port 11211.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.py index 6884fc8a..ae6810de 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_memcached_11211( findings = [] check_ports = [11211] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.metadata.json index e18b9716..2e5aa220 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to MySQL port 3306.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroups", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.py index e04f1dbb..d517d390 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_mysql_3306(Check findings = [] check_ports = [3306] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.metadata.json index cca503aa..92d10303 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.metadata.json @@ -1,9 +1,10 @@ { "Provider": "aws", - "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Oracle ports 1521 or 2483.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -31,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.py index 6085c877..27eeccb1 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_oracle_1521_2483 findings = [] check_ports = [1521, 2483] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.metadata.json index 58c3d238..67c751cf 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Postgres port 5432.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.py index e941e2d7..741f2179 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_postgres_5432(Ch findings = [] check_ports = [5432] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.metadata.json index a3f1953e..16df25c7 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Redis port 6379.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.py index bb7612ed..5e88545f 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_redis_6379(Check findings = [] check_ports = [6379] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.metadata.json index 980e7386..5d5afc27 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Windows SQL Server ports 1433 or 1434.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.py index 3c0fbdaa..d8ad9ffb 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_1434.py @@ -10,7 +10,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_sql_server_1433_ findings = [] check_ports = [1433, 1434] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.metadata.json index 0fc2bf12..632fa52c 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Telnet port 23.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.py b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.py index 4a504686..2c71ffa0 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23/ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23.py @@ -8,7 +8,7 @@ class ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_telnet_23(Check) findings = [] check_ports = [23] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.metadata.json index c2e0ad86..bdfefb95 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.metadata.json @@ -1,8 +1,10 @@ { "Provider": "aws", - "CheckID": "ec2_securitygroup_allow_ingress_from_internet_to_tcp_port_kafka_9092", + "CheckID": "ec2_securitygroup_allow_wide_open_public_ipv4", "CheckTitle": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Kafka port 9092.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.py b/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.py index e19d1182..f2d5dec6 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.py +++ b/providers/aws/services/ec2/ec2_securitygroup_allow_wide_open_public_ipv4/ec2_securitygroup_allow_wide_open_public_ipv4.py @@ -9,7 +9,7 @@ class ec2_securitygroup_allow_wide_open_public_ipv4(Check): findings = [] cidr_treshold = 24 for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.metadata.json index d13bbb79..c4555d9f 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_default_restrict_traffic", "CheckTitle": "Ensure the default security group of every VPC restricts all traffic.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.py b/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.py index 4f5906b7..d93dfaad 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.py +++ b/providers/aws/services/ec2/ec2_securitygroup_default_restrict_traffic/ec2_securitygroup_default_restrict_traffic.py @@ -7,7 +7,7 @@ class ec2_securitygroup_default_restrict_traffic(Check): def execute(self): findings = [] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id # Find default security group diff --git a/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.metadata.json index 2c07914b..49e5d180 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_from_launch_wizard", "CheckTitle": "Security Groups created by EC2 Launch Wizard.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.py b/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.py index 2f6e09e8..65faace5 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.py +++ b/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard.py @@ -6,7 +6,7 @@ class ec2_securitygroup_from_launch_wizard(Check): def execute(self): findings = [] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.metadata.json index 3eb399c9..e9af21b4 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.metadata.json @@ -1,8 +1,10 @@ { "Provider": "aws", - "CheckID": "ec2_securitygroup_with_many_ingress_egress_rules", + "CheckID": "ec2_securitygroup_in_use_without_ingress_filtering", "CheckTitle": "Ensure there are no Security Groups without ingress filtering being used.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.py b/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.py index 94a1c57f..e36688dc 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.py +++ b/providers/aws/services/ec2/ec2_securitygroup_in_use_without_ingress_filtering/ec2_securitygroup_in_use_without_ingress_filtering.py @@ -7,7 +7,7 @@ class ec2_securitygroup_in_use_without_ingress_filtering(Check): def execute(self): findings = [] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.metadata.json index eece52fa..0e448e15 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_not_used", "CheckTitle": "Ensure there are no Security Groups not being used.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.py b/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.py index 0ce5652b..b6543384 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.py +++ b/providers/aws/services/ec2/ec2_securitygroup_not_used/ec2_securitygroup_not_used.py @@ -6,7 +6,7 @@ class ec2_securitygroup_not_used(Check): def execute(self): findings = [] for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.metadata.json b/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.metadata.json index b94c5e0e..66bec0b1 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.metadata.json +++ b/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "ec2_securitygroup_with_many_ingress_egress_rules", "CheckTitle": "Find security groups with more than 50 ingress or egress rules.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "ec2", "SubServiceName": "securitygroup", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.py b/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.py index 83825e08..b9101ad5 100644 --- a/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.py +++ b/providers/aws/services/ec2/ec2_securitygroup_with_many_ingress_egress_rules/ec2_securitygroup_with_many_ingress_egress_rules.py @@ -8,7 +8,7 @@ class ec2_securitygroup_with_many_ingress_egress_rules(Check): findings = [] max_security_group_rules = get_config_var("max_security_group_rules") for security_group in ec2_client.security_groups: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = security_group.region report.resource_id = security_group.id report.status = "PASS" diff --git a/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.metadata.json b/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.metadata.json index 7d35f3f9..8c05043e 100644 --- a/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.metadata.json +++ b/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "ecr_repositories_lifecycle_policy_enabled", - "CheckTitle": "Check if ECR repositories have lifecycle policies enabled", - "CheckType": ["Identify", "Resource configuration"], - "ServiceName": "ecr", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "low", - "ResourceType": "AwsEcrRepository", - "Description": "Check if ECR repositories have lifecycle policies enabled", - "Risk": "Amazon ECR repositories run the risk of retaining huge volumes of images, increasing unnecessary cost.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ECR/lifecycle-policy-in-use.html", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Open the Amazon ECR console. Create an ECR lifecycle policy.", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html" - } + "Provider": "aws", + "CheckID": "ecr_repositories_lifecycle_policy_enabled", + "CheckTitle": "Check if ECR repositories have lifecycle policies enabled", + "CheckType": [ + "Identify", + "Resource configuration" + ], + "ServiceName": "ecr", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "low", + "ResourceType": "AwsEcrRepository", + "Description": "Check if ECR repositories have lifecycle policies enabled", + "Risk": "Amazon ECR repositories run the risk of retaining huge volumes of images, increasing unnecessary cost.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ECR/lifecycle-policy-in-use.html", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Open the Amazon ECR console. Create an ECR lifecycle policy.", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.py b/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.py index 7493f8d3..e294c4b6 100644 --- a/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.py +++ b/providers/aws/services/ecr/ecr_repositories_lifecycle_policy_enabled/ecr_repositories_lifecycle_policy_enabled.py @@ -6,7 +6,7 @@ class ecr_repositories_lifecycle_policy_enabled(Check): def execute(self): findings = [] for repository in ecr_client.repositories: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = repository.region report.resource_id = repository.name report.resource_arn = repository.arn diff --git a/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.metadata.json b/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.metadata.json index 30eb3f14..48a8edeb 100644 --- a/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.metadata.json +++ b/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "ecr_repositories_not_publicly_accessible", - "CheckTitle": "Ensure there are no ECR repositories set as Public", - "CheckType": ["Protect", "Secure Access Management"], - "ServiceName": "ecr", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "critical", - "ResourceType": "AwsEcrRepository", - "Description": "Ensure there are no ECR repositories set as Public", - "Risk": "A repository policy that allows anonymous access may allow anonymous users to perform actions.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "https://docs.bridgecrew.io/docs/public_1-ecr-repositories-not-public#cloudformation", - "Other": "https://docs.bridgecrew.io/docs/public_1-ecr-repositories-not-public#aws-console", - "Terraform": "" - }, - "Recommendation": { - "Text": "Ensure the repository and its contents are not publicly accessible", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/public/security_iam_service-with-iam.html" - } + "Provider": "aws", + "CheckID": "ecr_repositories_not_publicly_accessible", + "CheckTitle": "Ensure there are no ECR repositories set as Public", + "CheckType": [ + "Protect", + "Secure Access Management" + ], + "ServiceName": "ecr", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "critical", + "ResourceType": "AwsEcrRepository", + "Description": "Ensure there are no ECR repositories set as Public", + "Risk": "A repository policy that allows anonymous access may allow anonymous users to perform actions.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "https://docs.bridgecrew.io/docs/public_1-ecr-repositories-not-public#cloudformation", + "Other": "https://docs.bridgecrew.io/docs/public_1-ecr-repositories-not-public#aws-console", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure the repository and its contents are not publicly accessible", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/public/security_iam_service-with-iam.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.py b/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.py index fcf20d7c..197c8310 100644 --- a/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.py +++ b/providers/aws/services/ecr/ecr_repositories_not_publicly_accessible/ecr_repositories_not_publicly_accessible.py @@ -6,7 +6,7 @@ class ecr_repositories_not_publicly_accessible(Check): def execute(self): findings = [] for repository in ecr_client.repositories: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = repository.region report.resource_id = repository.name report.resource_arn = repository.arn diff --git a/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.metadata.json b/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.metadata.json index 9bf6cd5f..00b142f3 100644 --- a/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.metadata.json +++ b/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "ecr_repositories_scan_images_on_push_enabled", - "CheckTitle": "Check if ECR image scan on push is enabled", - "CheckType": ["Identify", "Vulnerability, patch, and version management"], - "ServiceName": "ecr", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsEcrRepository", - "Description": "Check if ECR image scan on push is enabled", - "Risk": "Amazon ECR image scanning helps in identifying software vulnerabilities in your container images. Amazon ECR uses the Common Vulnerabilities and Exposures (CVEs) database from the open-source Clair project and provides a list of scan findings. ", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws ecr create-repository --repository-name --image-scanning-configuration scanOnPush=true--region ", - "NativeIaC": "https://docs.bridgecrew.io/docs/general_8#cli-command", - "Other": "", - "Terraform": "https://docs.bridgecrew.io/docs/general_8#fix---buildtime" - }, - "Recommendation": { - "Text": "Enable ECR image scanning and review the scan findings for information about the security of the container images that are being deployed.", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" - } + "Provider": "aws", + "CheckID": "ecr_repositories_scan_images_on_push_enabled", + "CheckTitle": "Check if ECR image scan on push is enabled", + "CheckType": [ + "Identify", + "Vulnerability, patch, and version management" + ], + "ServiceName": "ecr", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEcrRepository", + "Description": "Check if ECR image scan on push is enabled", + "Risk": "Amazon ECR image scanning helps in identifying software vulnerabilities in your container images. Amazon ECR uses the Common Vulnerabilities and Exposures (CVEs) database from the open-source Clair project and provides a list of scan findings. ", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws ecr create-repository --repository-name --image-scanning-configuration scanOnPush=true--region ", + "NativeIaC": "https://docs.bridgecrew.io/docs/general_8#cli-command", + "Other": "", + "Terraform": "https://docs.bridgecrew.io/docs/general_8#fix---buildtime" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable ECR image scanning and review the scan findings for information about the security of the container images that are being deployed.", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.py b/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.py index 5d28a9e8..3ea725e8 100644 --- a/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.py +++ b/providers/aws/services/ecr/ecr_repositories_scan_images_on_push_enabled/ecr_repositories_scan_images_on_push_enabled.py @@ -6,7 +6,7 @@ class ecr_repositories_scan_images_on_push_enabled(Check): def execute(self): findings = [] for repository in ecr_client.repositories: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = repository.region report.resource_id = repository.name report.resource_arn = repository.arn diff --git a/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.metadata.json b/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.metadata.json index 7ef75831..95a2a1d6 100644 --- a/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.metadata.json +++ b/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "ecr_repositories_scan_vulnerabilities_in_latest_image", - "CheckTitle": "Check if ECR image scan found vulnerabilities in the newest image version", - "CheckType": ["Identify", "Vulnerability, patch, and version management"], - "ServiceName": "ecr", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsEcrRepository", - "Description": "Check if ECR image scan found vulnerabilities in the newest image version", - "Risk": "Amazon ECR image scanning helps in identifying software vulnerabilities in your container images. Amazon ECR uses the Common Vulnerabilities and Exposures (CVEs) database from the open-source Clair project and provides a list of scan findings.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Open the Amazon ECR console. Then look for vulnerabilities and fix them.", - "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#describe-scan-findings" - } + "Provider": "aws", + "CheckID": "ecr_repositories_scan_vulnerabilities_in_latest_image", + "CheckTitle": "Check if ECR image scan found vulnerabilities in the newest image version", + "CheckType": [ + "Identify", + "Vulnerability, patch, and version management" + ], + "ServiceName": "ecr", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEcrRepository", + "Description": "Check if ECR image scan found vulnerabilities in the newest image version", + "Risk": "Amazon ECR image scanning helps in identifying software vulnerabilities in your container images. Amazon ECR uses the Common Vulnerabilities and Exposures (CVEs) database from the open-source Clair project and provides a list of scan findings.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Open the Amazon ECR console. Then look for vulnerabilities and fix them.", + "Url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#describe-scan-findings" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py b/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py index af0694fc..5a6e68c0 100644 --- a/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py +++ b/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py @@ -7,7 +7,7 @@ class ecr_repositories_scan_vulnerabilities_in_latest_image(Check): findings = [] for repository in ecr_client.repositories: for image in repository.images_details: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = repository.region report.resource_id = repository.name report.resource_arn = repository.arn diff --git a/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.metadata.json b/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.metadata.json index 98a0c6d7..30e26524 100644 --- a/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.metadata.json +++ b/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "ecs_task_definitions_no_environment_secrets", - "CheckTitle": "Check if secrets exists in ECS task definitions environment variables", - "CheckType": ["Protect", "Secure development", "Credentials not hard-coded"], - "ServiceName": "ecs", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "critical", - "ResourceType": "AwsEcsTaskDefinition", - "Description": "Check if secrets exists in ECS task definitions environment variables", - "Risk": "The use of a hard-coded password increases the possibility of password guessing. If hard-coded passwords are used; it is possible that malicious users gain access through the account in question.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Use Secrets Manager or Parameter Store to securely provide credentials to containers without hardcoding the secrets in code or passing them through environment variables.", - "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html" - } + "Provider": "aws", + "CheckID": "ecs_task_definitions_no_environment_secrets", + "CheckTitle": "Check if secrets exists in ECS task definitions environment variables", + "CheckType": [ + "Protect", + "Secure development", + "Credentials not hard-coded" + ], + "ServiceName": "ecs", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "critical", + "ResourceType": "AwsEcsTaskDefinition", + "Description": "Check if secrets exists in ECS task definitions environment variables", + "Risk": "The use of a hard-coded password increases the possibility of password guessing. If hard-coded passwords are used; it is possible that malicious users gain access through the account in question.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Use Secrets Manager or Parameter Store to securely provide credentials to containers without hardcoding the secrets in code or passing them through environment variables.", + "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.py b/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.py index 800aed61..8cae04d3 100644 --- a/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.py +++ b/providers/aws/services/ecs/ecs_task_definitions_no_environment_secrets/ecs_task_definitions_no_environment_secrets.py @@ -13,7 +13,7 @@ class ecs_task_definitions_no_environment_secrets(Check): def execute(self): findings = [] for task_definition in ecs_client.task_definitions: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = task_definition.region report.resource_id = task_definition.name report.resource_arn = task_definition.arn diff --git a/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.metadata.json b/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.metadata.json index cb6b1a5a..efb6f099 100644 --- a/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.metadata.json +++ b/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "efs_encryption_at_rest_enabled", - "CheckTitle": "Check if EFS protects sensitive data with encryption at rest", - "CheckType": ["Protect", "Data protection", "Encryption of data at rest"], - "ServiceName": "efs", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsEFSFileSystem", - "Description": "Check if EFS protects sensitive data with encryption at rest", - "Risk": "EFS should be encrypted at rest to prevent exposure of sensitive data to bad actors", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws efs create-file-system --creation-token $(uuidgen) --performance-mode generalPurpose --encrypted --kms-key-id user/customer-managedCMKalias", - "NativeIaC": "https://docs.bridgecrew.io/docs/general_17#cloudformation", - "Other": "", - "Terraform": "https://docs.bridgecrew.io/docs/general_17#terraform" - }, - "Recommendation": { - "Text": "Ensure that encryption at rest is enabled for EFS file systems. Encryption at rest can only be enabled during the file system creation.", - "Url": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html" - } + "Provider": "aws", + "CheckID": "efs_encryption_at_rest_enabled", + "CheckTitle": "Check if EFS protects sensitive data with encryption at rest", + "CheckType": [ + "Protect", + "Data protection", + "Encryption of data at rest" + ], + "ServiceName": "efs", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEFSFileSystem", + "Description": "Check if EFS protects sensitive data with encryption at rest", + "Risk": "EFS should be encrypted at rest to prevent exposure of sensitive data to bad actors", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws efs create-file-system --creation-token $(uuidgen) --performance-mode generalPurpose --encrypted --kms-key-id user/customer-managedCMKalias", + "NativeIaC": "https://docs.bridgecrew.io/docs/general_17#cloudformation", + "Other": "", + "Terraform": "https://docs.bridgecrew.io/docs/general_17#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure that encryption at rest is enabled for EFS file systems. Encryption at rest can only be enabled during the file system creation.", + "Url": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.py b/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.py index 0608e74b..cd1ff8b2 100644 --- a/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.py +++ b/providers/aws/services/efs/efs_encryption_at_rest_enabled/efs_encryption_at_rest_enabled.py @@ -6,7 +6,7 @@ class efs_encryption_at_rest_enabled(Check): def execute(self): findings = [] for fs in efs_client.filesystems: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = fs.region report.resource_id = fs.id report.resource_arn = "" diff --git a/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.metadata.json b/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.metadata.json index 19d039df..31c48680 100644 --- a/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.metadata.json +++ b/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "efs_have_backup_enabled", - "CheckTitle": "Check if EFS File systems have backup enabled", - "CheckType": ["Recover", "Resilience", "Backup"], - "ServiceName": "efs", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsEFSFileSystem", - "Description": "Check if EFS File systems have backup enabled", - "Risk": "If backup is not enabled, data is vulnerable. Human error or bad actors could erase or modify data.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach.", - "Url": "https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html" - } + "Provider": "aws", + "CheckID": "efs_have_backup_enabled", + "CheckTitle": "Check if EFS File systems have backup enabled", + "CheckType": [ + "Recover", + "Resilience", + "Backup" + ], + "ServiceName": "efs", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEFSFileSystem", + "Description": "Check if EFS File systems have backup enabled", + "Risk": "If backup is not enabled, data is vulnerable. Human error or bad actors could erase or modify data.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach.", + "Url": "https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.py b/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.py index 98b0ab08..1d06b572 100644 --- a/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.py +++ b/providers/aws/services/efs/efs_have_backup_enabled/efs_have_backup_enabled.py @@ -6,7 +6,7 @@ class efs_have_backup_enabled(Check): def execute(self): findings = [] for fs in efs_client.filesystems: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = fs.region report.resource_id = fs.id report.resource_arn = "" diff --git a/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.metadata.json b/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.metadata.json index df9a1e1d..019b4b20 100644 --- a/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.metadata.json +++ b/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "efs_not_publicly_accessible", - "CheckTitle": "Check if EFS have policies which allow access to everyone", - "CheckType": ["Protect", "Data protection"], - "ServiceName": "efs", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "critical", - "ResourceType": "AwsEFSFileSystem", - "Description": "Check if EFS have policies which allow access to everyone", - "Risk": "EFS accessible to everyone could expose sensitive data to bad actors", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Ensure efs has some policy but it does not have principle as *", - "Url": "https://docs.aws.amazon.com/efs/latest/ug/access-control-block-public-access.html" - } + "Provider": "aws", + "CheckID": "efs_not_publicly_accessible", + "CheckTitle": "Check if EFS have policies which allow access to everyone", + "CheckType": [ + "Protect", + "Data protection" + ], + "ServiceName": "efs", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "critical", + "ResourceType": "AwsEFSFileSystem", + "Description": "Check if EFS have policies which allow access to everyone", + "Risk": "EFS accessible to everyone could expose sensitive data to bad actors", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure efs has some policy but it does not have principle as *", + "Url": "https://docs.aws.amazon.com/efs/latest/ug/access-control-block-public-access.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.py b/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.py index 836b91ef..2427c744 100644 --- a/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.py +++ b/providers/aws/services/efs/efs_not_publicly_accessible/efs_not_publicly_accessible.py @@ -6,7 +6,7 @@ class efs_not_publicly_accessible(Check): def execute(self): findings = [] for fs in efs_client.filesystems: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = fs.region report.resource_id = fs.id report.resource_arn = "" diff --git a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json index 5758d038..6ffe0aae 100644 --- a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json +++ b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "eks_cluster_kms_cmk_encryption_in_secrets_enabled", - "CheckTitle": "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs)", - "CheckType": ["Protect" , "Data protection"], - "ServiceName": "eks", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsEksCluster", - "Description": "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs)", - "Risk": "Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/EKS/enable-envelope-encryption.html", - "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_3#fix---builtime", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Setup your own Customer Master Key (CMK) in KMS and link this key by providing the CMK ARN when you create an EKS cluster.", - "Url": "https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html" - } + "Provider": "aws", + "CheckID": "eks_cluster_kms_cmk_encryption_in_secrets_enabled", + "CheckTitle": "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs)", + "CheckType": [ + "Protect", + "Data protection" + ], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEksCluster", + "Description": "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs)", + "Risk": "Implementing envelope encryption is considered a security best practice for applications that store sensitive data and is part of a defense in depth security strategy.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/EKS/enable-envelope-encryption.html", + "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_3#fix---builtime", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Setup your own Customer Master Key (CMK) in KMS and link this key by providing the CMK ARN when you create an EKS cluster.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py index 633def6d..40290024 100644 --- a/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py +++ b/providers/aws/services/eks/eks_cluster_kms_cmk_encryption_in_secrets_enabled/eks_cluster_kms_cmk_encryption_in_secrets_enabled.py @@ -6,7 +6,7 @@ class eks_cluster_kms_cmk_encryption_in_secrets_enabled(Check): def execute(self): findings = [] for cluster in eks_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.name report.resource_arn = cluster.arn diff --git a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json index efac79bf..19b6d1ac 100644 --- a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json +++ b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "eks_control_plane_endpoint_access_restricted", - "CheckTitle": "Restrict Access to the EKS Control Plane Endpoint", - "CheckType": ["Infrastructure Security"], - "ServiceName": "eks", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsEksCluster", - "Description": "Restrict Access to the EKS Control Plane Endpoint", - "Risk": "By default; this API server endpoint is public to the internet; and access to the API server is secured using a combination of AWS Identity and Access Management (IAM) and native Kubernetes Role Based Access Control (RBAC).", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws eks update-cluster-config --region --name --resources-vpc-config endpointPublicAccess=false,endpointPrivateAccess=true,publicAccessCidrs=[\"123.123.123.123/32\"]", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "You should enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. You can limit the IP addresses that can access your API server from the internet; or completely disable internet access to the API server.", - "Url": "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" - } + "Provider": "aws", + "CheckID": "eks_control_plane_endpoint_access_restricted", + "CheckTitle": "Restrict Access to the EKS Control Plane Endpoint", + "CheckType": [ + "Infrastructure Security" + ], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEksCluster", + "Description": "Restrict Access to the EKS Control Plane Endpoint", + "Risk": "By default; this API server endpoint is public to the internet; and access to the API server is secured using a combination of AWS Identity and Access Management (IAM) and native Kubernetes Role Based Access Control (RBAC).", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws eks update-cluster-config --region --name --resources-vpc-config endpointPublicAccess=false,endpointPrivateAccess=true,publicAccessCidrs=[\"123.123.123.123/32\"]", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "You should enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. You can limit the IP addresses that can access your API server from the internet; or completely disable internet access to the API server.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py index 62dbf6f4..76e34a19 100644 --- a/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py +++ b/providers/aws/services/eks/eks_control_plane_endpoint_access_restricted/eks_control_plane_endpoint_access_restricted.py @@ -6,7 +6,7 @@ class eks_control_plane_endpoint_access_restricted(Check): def execute(self): findings = [] for cluster in eks_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.name report.resource_arn = cluster.arn diff --git a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json index 2419bf6d..8323bfcd 100644 --- a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json +++ b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "eks_control_plane_logging_all_types_enabled", - "CheckTitle": "Ensure EKS Control Plane Audit Logging is enabled for all log types", - "CheckType": ["Logging and Monitoring"], - "ServiceName": "eks", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsEksCluster", - "Description": "Ensure EKS Control Plane Audit Logging is enabled for all log types", - "Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws eks update-cluster-config --region --name --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'", - "NativeIaC": "", - "Other": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_4#aws-console", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_4#fix---buildtime" - }, - "Recommendation": { - "Text": "Make sure you logging for EKS control plane is enabled.", - "Url": "https://docs.aws.amazon.com/eks/latest/userguide/logging-monitoring.html" - } + "Provider": "aws", + "CheckID": "eks_control_plane_logging_all_types_enabled", + "CheckTitle": "Ensure EKS Control Plane Audit Logging is enabled for all log types", + "CheckType": [ + "Logging and Monitoring" + ], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsEksCluster", + "Description": "Ensure EKS Control Plane Audit Logging is enabled for all log types", + "Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws eks update-cluster-config --region --name --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_4#aws-console", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_kubernetes_4#fix---buildtime" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Make sure you logging for EKS control plane is enabled.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/logging-monitoring.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py index a42fb248..55e68be3 100644 --- a/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py +++ b/providers/aws/services/eks/eks_control_plane_logging_all_types_enabled/eks_control_plane_logging_all_types_enabled.py @@ -6,7 +6,7 @@ class eks_control_plane_logging_all_types_enabled(Check): def execute(self): findings = [] for cluster in eks_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.name report.resource_arn = cluster.arn diff --git a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json index ffd100d6..f5071470 100644 --- a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json +++ b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "eks_endpoints_not_publicly_accessible", - "CheckTitle": "Ensure EKS Clusters are created with Private Endpoint Enabled and Public Access Disabled", - "CheckType": ["Protect", "Secure network configuration","Resources not publicly accessible"], - "ServiceName": "eks", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "high", - "ResourceType": "AwsEksCluster", - "Description": "Ensure EKS Clusters are created with Private Endpoint Enabled and Public Access Disabled", - "Risk": "Publicly accessible services could expose sensitive data to bad actors.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws eks update-cluster-config --region --name --resources-vpc-config endpointPublicAccess=false,endpointPrivateAccess=true,publicAccessCidrs=[\"123.123.123.123/32\"]", - "NativeIaC": "", - "Other": "https://github.com/cloudmatos/matos/tree/master/remediations/aws/eks/eks-disable-public-endpoint", - "Terraform": "" - }, - "Recommendation": { - "Text": "Enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. Disable internet access to the API server.", - "Url": "https://docs.aws.amazon.com/eks/latest/userguide/infrastructure-security.html" - } + "Provider": "aws", + "CheckID": "eks_endpoints_not_publicly_accessible", + "CheckTitle": "Ensure EKS Clusters are created with Private Endpoint Enabled and Public Access Disabled", + "CheckType": [ + "Protect", + "Secure network configuration", + "Resources not publicly accessible" + ], + "ServiceName": "eks", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "high", + "ResourceType": "AwsEksCluster", + "Description": "Ensure EKS Clusters are created with Private Endpoint Enabled and Public Access Disabled", + "Risk": "Publicly accessible services could expose sensitive data to bad actors.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws eks update-cluster-config --region --name --resources-vpc-config endpointPublicAccess=false,endpointPrivateAccess=true,publicAccessCidrs=[\"123.123.123.123/32\"]", + "NativeIaC": "", + "Other": "https://github.com/cloudmatos/matos/tree/master/remediations/aws/eks/eks-disable-public-endpoint", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable private access to the Kubernetes API server so that all communication between your nodes and the API server stays within your VPC. Disable internet access to the API server.", + "Url": "https://docs.aws.amazon.com/eks/latest/userguide/infrastructure-security.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py index 475d8b3e..54d86bbc 100644 --- a/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py +++ b/providers/aws/services/eks/eks_endpoints_not_publicly_accessible/eks_endpoints_not_publicly_accessible.py @@ -6,7 +6,7 @@ class eks_endpoints_not_publicly_accessible(Check): def execute(self): findings = [] for cluster in eks_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.name report.resource_arn = cluster.arn diff --git a/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.metadata.json b/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.metadata.json index 1cc0f3b1..566053a0 100644 --- a/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.metadata.json +++ b/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elb_insecure_ssl_ciphers", - "CheckTitle": "Check if Elastic Load Balancers have insecure SSL ciphers.", - "CheckType": ["Data Protection"], - "ServiceName": "elb", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElbLoadBalancer", - "Description": "Check if Elastic Load Balancers have insecure SSL ciphers.", - "Risk": "Using insecure ciphers could affect privacy of in transit information.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elb set-load-balancer-policies-of-listener --load-balancer-name --load-balancer-port 443 --policy-names ELBSecurityPolicy-TLS-1-2-2017-01", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/elb-security-policy.html", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_general_43#terraform" - }, - "Recommendation": { - "Text": "Use a Security policy with a ciphers that are stronger as possible. Drop legacy and unsecure ciphers.", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies" - } + "Provider": "aws", + "CheckID": "elb_insecure_ssl_ciphers", + "CheckTitle": "Check if Elastic Load Balancers have insecure SSL ciphers.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elb", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElbLoadBalancer", + "Description": "Check if Elastic Load Balancers have insecure SSL ciphers.", + "Risk": "Using insecure ciphers could affect privacy of in transit information.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elb set-load-balancer-policies-of-listener --load-balancer-name --load-balancer-port 443 --policy-names ELBSecurityPolicy-TLS-1-2-2017-01", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/elb-security-policy.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_general_43#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Use a Security policy with a ciphers that are stronger as possible. Drop legacy and unsecure ciphers.", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.py b/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.py index b359412a..5a27cb06 100644 --- a/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.py +++ b/providers/aws/services/elb/elb_insecure_ssl_ciphers/elb_insecure_ssl_ciphers.py @@ -9,7 +9,7 @@ class elb_insecure_ssl_ciphers(Check): "ELBSecurityPolicy-TLS-1-2-2017-01", ] for lb in elb_client.loadbalancers: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.status = "PASS" diff --git a/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.metadata.json b/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.metadata.json index 50d9de4a..17ccf5c7 100644 --- a/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.metadata.json +++ b/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elb_internet_facing", - "CheckTitle": "Check for internet facing Elastic Load Balancers.", - "CheckType": ["Data Protection"], - "ServiceName": "elb", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElbLoadBalancer", - "Description": "Check for internet facing Elastic Load Balancers.", - "Risk": "Publicly accessible load balancers could expose sensitive data to bad actors.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/internet-facing-load-balancers.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Ensure the load balancer should be publicly accessible. If publicly exposed ensure a WAF ACL is implemented.", - "Url": "https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-associating-aws-resource.html" - } + "Provider": "aws", + "CheckID": "elb_internet_facing", + "CheckTitle": "Check for internet facing Elastic Load Balancers.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elb", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElbLoadBalancer", + "Description": "Check for internet facing Elastic Load Balancers.", + "Risk": "Publicly accessible load balancers could expose sensitive data to bad actors.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/internet-facing-load-balancers.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure the load balancer should be publicly accessible. If publicly exposed ensure a WAF ACL is implemented.", + "Url": "https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-associating-aws-resource.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.py b/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.py index e0fe0cf5..94b06c23 100644 --- a/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.py +++ b/providers/aws/services/elb/elb_internet_facing/elb_internet_facing.py @@ -6,7 +6,7 @@ class elb_internet_facing(Check): def execute(self): findings = [] for lb in elb_client.loadbalancers: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.status = "PASS" diff --git a/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.metadata.json b/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.metadata.json index e8dd1630..f5836c33 100644 --- a/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.metadata.json +++ b/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elb_logging_enabled", - "CheckTitle": "Check if Elastic Load Balancers have logging enabled.", - "CheckType": ["Logging and Monitoring"], - "ServiceName": "elb", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElbLoadBalancer", - "Description": "Check if Elastic Load Balancers have logging enabled.", - "Risk": "If logs are not enabled monitoring of service use and threat analysis is not possible.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elb modify-load-balancer-attributes --load-balancer-name --load-balancer-attributes '{AccessLog:{Enabled:true,EmitInterval:60,S3BucketName:}}'", - "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_logging_23#cloudformation", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/elb-access-log.html", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_logging_23#terraform" - }, - "Recommendation": { - "Text": "Enable ELB logging, create la log lifecycle and define use cases.", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html" - } + "Provider": "aws", + "CheckID": "elb_logging_enabled", + "CheckTitle": "Check if Elastic Load Balancers have logging enabled.", + "CheckType": [ + "Logging and Monitoring" + ], + "ServiceName": "elb", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElbLoadBalancer", + "Description": "Check if Elastic Load Balancers have logging enabled.", + "Risk": "If logs are not enabled monitoring of service use and threat analysis is not possible.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elb modify-load-balancer-attributes --load-balancer-name --load-balancer-attributes '{AccessLog:{Enabled:true,EmitInterval:60,S3BucketName:}}'", + "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_logging_23#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/elb-access-log.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_logging_23#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable ELB logging, create la log lifecycle and define use cases.", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.py b/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.py index 27f72bd9..9bd698ee 100644 --- a/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.py +++ b/providers/aws/services/elb/elb_logging_enabled/elb_logging_enabled.py @@ -6,7 +6,7 @@ class elb_logging_enabled(Check): def execute(self): findings = [] for lb in elb_client.loadbalancers: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.status = "FAIL" diff --git a/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.metadata.json b/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.metadata.json index aec1630e..dd210e4e 100644 --- a/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.metadata.json +++ b/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elb_ssl_listeners", - "CheckTitle": "Check if Elastic Load Balancers have SSL listeners.", - "CheckType": ["Data Protection"], - "ServiceName": "elb", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElbLoadBalancer", - "Description": "Check if Elastic Load Balancers have SSL listeners.", - "Risk": "Clear text communication could affect privacy of information in transit.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elb create-load-balancer-listeners --load-balancer-name --listeners Protocol=HTTPS, LoadBalancerPort=443, InstanceProtocol=HTTP, InstancePort=80, SSLCertificateId=", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/elb-listener-security.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Scan for Load Balancers with HTTP or TCP listeners and understand the reason for each of them. Check if the listener can be implemented as TLS instead..", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html" - } + "Provider": "aws", + "CheckID": "elb_ssl_listeners", + "CheckTitle": "Check if Elastic Load Balancers have SSL listeners.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elb", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElbLoadBalancer", + "Description": "Check if Elastic Load Balancers have SSL listeners.", + "Risk": "Clear text communication could affect privacy of information in transit.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elb create-load-balancer-listeners --load-balancer-name --listeners Protocol=HTTPS, LoadBalancerPort=443, InstanceProtocol=HTTP, InstancePort=80, SSLCertificateId=", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELB/elb-listener-security.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Scan for Load Balancers with HTTP or TCP listeners and understand the reason for each of them. Check if the listener can be implemented as TLS instead..", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.py b/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.py index f6a72d09..6db30978 100644 --- a/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.py +++ b/providers/aws/services/elb/elb_ssl_listeners/elb_ssl_listeners.py @@ -7,7 +7,7 @@ class elb_ssl_listeners(Check): findings = [] secure_protocols = ["SSL", "HTTPS"] for lb in elb_client.loadbalancers: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.status = "PASS" diff --git a/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.metadata.json b/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.metadata.json index edcb0e95..a8266aeb 100644 --- a/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.metadata.json +++ b/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_deletion_protection", - "CheckTitle": "Check if Elastic Load Balancers have deletion protection enabled.", - "CheckType": ["Data Protection"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check if Elastic Load Balancers have deletion protection enabled.", - "Risk": "If deletion protection is not enabled, the resource is not protected against deletion.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=deletion_protection.enabled,Value=true", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/deletion-protection.html", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_networking_62#terraform" - }, - "Recommendation": { - "Text": "Enable deletion protection attribute, this is not enabled by default.", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#deletion-protection" - } + "Provider": "aws", + "CheckID": "elbv2_deletion_protection", + "CheckTitle": "Check if Elastic Load Balancers have deletion protection enabled.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check if Elastic Load Balancers have deletion protection enabled.", + "Risk": "If deletion protection is not enabled, the resource is not protected against deletion.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=deletion_protection.enabled,Value=true", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/deletion-protection.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_networking_62#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable deletion protection attribute, this is not enabled by default.", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#deletion-protection" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.py b/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.py index 223fac05..9d16c649 100644 --- a/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.py +++ b/providers/aws/services/elbv2/elbv2_deletion_protection/elbv2_deletion_protection.py @@ -6,7 +6,7 @@ class elbv2_deletion_protection(Check): def execute(self): findings = [] for lb in elbv2_client.loadbalancersv2: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.metadata.json b/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.metadata.json index ecc5fe07..ef820524 100644 --- a/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.metadata.json +++ b/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_desync_mitigation_mode", - "CheckTitle": "Check whether the Application Load Balancer is configured with defensive or strictest desync mitigation mode.", - "CheckType": ["Data Protection"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check whether the Application Load Balancer is configured with defensive or strictest desync mitigation mode.", - "Risk": "HTTP Desync issues can lead to request smuggling and make your applications vulnerable to request queue or cache poisoning; which could lead to credential hijacking or execution of unauthorized commands.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=routing.http.desync_mitigation_mode,Value=", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Ensure Application Load Balancer is configured with defensive or strictest desync mitigation mode.", - "Url": "https://aws.amazon.com/about-aws/whats-new/2020/08/application-and-classic-load-balancers-adding-defense-in-depth-with-introduction-of-desync-mitigation-mode/" - } + "Provider": "aws", + "CheckID": "elbv2_desync_mitigation_mode", + "CheckTitle": "Check whether the Application Load Balancer is configured with defensive or strictest desync mitigation mode.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check whether the Application Load Balancer is configured with defensive or strictest desync mitigation mode.", + "Risk": "HTTP Desync issues can lead to request smuggling and make your applications vulnerable to request queue or cache poisoning; which could lead to credential hijacking or execution of unauthorized commands.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=routing.http.desync_mitigation_mode,Value=", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure Application Load Balancer is configured with defensive or strictest desync mitigation mode.", + "Url": "https://aws.amazon.com/about-aws/whats-new/2020/08/application-and-classic-load-balancers-adding-defense-in-depth-with-introduction-of-desync-mitigation-mode/" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.py b/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.py index ab0f0fd3..77878e81 100644 --- a/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.py +++ b/providers/aws/services/elbv2/elbv2_desync_mitigation_mode/elbv2_desync_mitigation_mode.py @@ -7,7 +7,7 @@ class elbv2_desync_mitigation_mode(Check): findings = [] for lb in elbv2_client.loadbalancersv2: if lb.type == "application": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.metadata.json b/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.metadata.json index cdd968cd..50c8daa5 100644 --- a/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.metadata.json +++ b/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_insecure_ssl_ciphers", - "CheckTitle": "Check if Elastic Load Balancers have insecure SSL ciphers.", - "CheckType": ["Data Protection"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check if Elastic Load Balancers have insecure SSL ciphers.", - "Risk": "Using insecure ciphers could affect privacy of in transit information.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elbv2 modify-listener --listener-arn --ssl-policy ELBSecurityPolicy-TLS13-1-2-Ext2-2021-06", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/security-policy.html", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_general_43#terraform" - }, - "Recommendation": { - "Text": "Use a Security policy with a ciphers that are stronger as possible. Drop legacy and unsecure ciphers.", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies" - } + "Provider": "aws", + "CheckID": "elbv2_insecure_ssl_ciphers", + "CheckTitle": "Check if Elastic Load Balancers have insecure SSL ciphers.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check if Elastic Load Balancers have insecure SSL ciphers.", + "Risk": "Using insecure ciphers could affect privacy of in transit information.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elbv2 modify-listener --listener-arn --ssl-policy ELBSecurityPolicy-TLS13-1-2-Ext2-2021-06", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/security-policy.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_general_43#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Use a Security policy with a ciphers that are stronger as possible. Drop legacy and unsecure ciphers.", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.py b/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.py index 86e1a390..fcd47cab 100644 --- a/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.py +++ b/providers/aws/services/elbv2/elbv2_insecure_ssl_ciphers/elbv2_insecure_ssl_ciphers.py @@ -18,7 +18,7 @@ class elbv2_insecure_ssl_ciphers(Check): "ELBSecurityPolicy-TLS13-1-2-Ext2-2021-06", ] for lb in elbv2_client.loadbalancersv2: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.metadata.json b/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.metadata.json index 22d67111..67792640 100644 --- a/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.metadata.json +++ b/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_internet_facing", - "CheckTitle": "Check for internet facing Elastic Load Balancers.", - "CheckType": ["Data Protection"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check for internet facing Elastic Load Balancers.", - "Risk": "Publicly accessible load balancers could expose sensitive data to bad actors.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/internet-facing-load-balancers.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Ensure the load balancer should be publicly accessible. If publicly exposed ensure a WAF ACL is implemented.", - "Url": "https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-associating-aws-resource.html" - } + "Provider": "aws", + "CheckID": "elbv2_internet_facing", + "CheckTitle": "Check for internet facing Elastic Load Balancers.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check for internet facing Elastic Load Balancers.", + "Risk": "Publicly accessible load balancers could expose sensitive data to bad actors.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/internet-facing-load-balancers.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure the load balancer should be publicly accessible. If publicly exposed ensure a WAF ACL is implemented.", + "Url": "https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-associating-aws-resource.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.py b/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.py index 1e0fc3a1..807116b1 100644 --- a/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.py +++ b/providers/aws/services/elbv2/elbv2_internet_facing/elbv2_internet_facing.py @@ -6,7 +6,7 @@ class elbv2_internet_facing(Check): def execute(self): findings = [] for lb in elbv2_client.loadbalancersv2: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.metadata.json b/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.metadata.json index 8f870111..10d8a30a 100644 --- a/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.metadata.json +++ b/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_listeners_underneath", - "CheckTitle": "Check if ELBV2 has listeners underneath.", - "CheckType": ["Data Protection"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check if ELBV2 has listeners underneath.", - "Risk": "The rules that are defined for a listener determine how the load balancer routes requests to its registered targets.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Add listeners to Elastic Load Balancers V2.", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html" - } + "Provider": "aws", + "CheckID": "elbv2_listeners_underneath", + "CheckTitle": "Check if ELBV2 has listeners underneath.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check if ELBV2 has listeners underneath.", + "Risk": "The rules that are defined for a listener determine how the load balancer routes requests to its registered targets.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Add listeners to Elastic Load Balancers V2.", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.py b/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.py index be59e687..e753fac5 100644 --- a/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.py +++ b/providers/aws/services/elbv2/elbv2_listeners_underneath/elbv2_listeners_underneath.py @@ -6,7 +6,7 @@ class elbv2_listeners_underneath(Check): def execute(self): findings = [] for lb in elbv2_client.loadbalancersv2: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.metadata.json b/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.metadata.json index 20ff6fcc..48d73dd0 100644 --- a/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.metadata.json +++ b/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_logging_enabled", - "CheckTitle": "Check if Elastic Load Balancers have logging enabled.", - "CheckType": ["Logging and Monitoring"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check if Elastic Load Balancers have logging enabled.", - "Risk": "If logs are not enabled monitoring of service use and threat analysis is not possible.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=access_logs.s3.enabled,Value=true Key=access_logs.s3.bucket,Value= Key=access_logs.s3.prefix,Value=", - "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_logging_22#cloudformation", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/access-log.html", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_logging_22#terraform" - }, - "Recommendation": { - "Text": "Enable ELB logging, create la log lifecycle and define use cases.", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html" - } + "Provider": "aws", + "CheckID": "elbv2_logging_enabled", + "CheckTitle": "Check if Elastic Load Balancers have logging enabled.", + "CheckType": [ + "Logging and Monitoring" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check if Elastic Load Balancers have logging enabled.", + "Risk": "If logs are not enabled monitoring of service use and threat analysis is not possible.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=access_logs.s3.enabled,Value=true Key=access_logs.s3.bucket,Value= Key=access_logs.s3.prefix,Value=", + "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_logging_22#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/access-log.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_logging_22#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable ELB logging, create la log lifecycle and define use cases.", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.py b/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.py index 634e62f7..4dd0abfa 100644 --- a/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.py +++ b/providers/aws/services/elbv2/elbv2_logging_enabled/elbv2_logging_enabled.py @@ -6,7 +6,7 @@ class elbv2_logging_enabled(Check): def execute(self): findings = [] for lb in elbv2_client.loadbalancersv2: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.metadata.json b/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.metadata.json index f9ee18c2..6e9cb17d 100644 --- a/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.metadata.json +++ b/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_request_smugling", - "CheckTitle": "Check if Application Load Balancer is dropping invalid packets to prevent header based HTTP request smuggling.", - "CheckType": ["Data Protection"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check if Application Load Balancer is dropping invalid packets to prevent header based HTTP request smuggling.", - "Risk": "ALB can be target of actors sending bad HTTP headers.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=routing.http.drop_invalid_header_fields.enabled,Value=true", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/drop-invalid-header-fields-enabled.html", - "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-alb-drops-http-headers#terraform" - }, - "Recommendation": { - "Text": "Ensure Application Load Balancer is configured for HTTP headers with header fields that are not valid are removed by the load balancer (true).", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#deletion-protection" - } + "Provider": "aws", + "CheckID": "elbv2_request_smugling", + "CheckTitle": "Check if Application Load Balancer is dropping invalid packets to prevent header based HTTP request smuggling.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check if Application Load Balancer is dropping invalid packets to prevent header based HTTP request smuggling.", + "Risk": "ALB can be target of actors sending bad HTTP headers.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elbv2 modify-load-balancer-attributes --load-balancer-arn --attributes Key=routing.http.drop_invalid_header_fields.enabled,Value=true", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ELBv2/drop-invalid-header-fields-enabled.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-alb-drops-http-headers#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure Application Load Balancer is configured for HTTP headers with header fields that are not valid are removed by the load balancer (true).", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#deletion-protection" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.py b/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.py index a079e9ca..75e2b8e8 100644 --- a/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.py +++ b/providers/aws/services/elbv2/elbv2_request_smugling/elbv2_request_smugling.py @@ -7,7 +7,7 @@ class elbv2_request_smugling(Check): findings = [] for lb in elbv2_client.loadbalancersv2: if lb.type == "application": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.metadata.json b/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.metadata.json index 6afcbd7b..567cde41 100644 --- a/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.metadata.json +++ b/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_ssl_listeners", - "CheckTitle": "Check if Elastic Load Balancers have SSL listeners.", - "CheckType": ["Data Protection"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check if Elastic Load Balancers have SSL listeners.", - "Risk": "Clear text communication could affect privacy of information in transit.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "aws elbv2 create-listener --load-balancer-arn --protocol HTTPS --port 443 --ssl-policy --certificates CertificateArn=,IsDefault=true", - "NativeIaC": "", - "Other": "https://docs.bridgecrew.io/docs/networking_36#aws-ec2-console", - "Terraform": "" - }, - "Recommendation": { - "Text": "Scan for Load Balancers with HTTP or TCP listeners and understand the reason for each of them. Check if the listener can be implemented as TLS instead.", - "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html" - } + "Provider": "aws", + "CheckID": "elbv2_ssl_listeners", + "CheckTitle": "Check if Elastic Load Balancers have SSL listeners.", + "CheckType": [ + "Data Protection" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check if Elastic Load Balancers have SSL listeners.", + "Risk": "Clear text communication could affect privacy of information in transit.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "aws elbv2 create-listener --load-balancer-arn --protocol HTTPS --port 443 --ssl-policy --certificates CertificateArn=,IsDefault=true", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/networking_36#aws-ec2-console", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Scan for Load Balancers with HTTP or TCP listeners and understand the reason for each of them. Check if the listener can be implemented as TLS instead.", + "Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.py b/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.py index 560ddc28..4b8b83fb 100644 --- a/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.py +++ b/providers/aws/services/elbv2/elbv2_ssl_listeners/elbv2_ssl_listeners.py @@ -7,7 +7,7 @@ class elbv2_ssl_listeners(Check): findings = [] for lb in elbv2_client.loadbalancersv2: if lb.type == "application": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.metadata.json b/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.metadata.json index acefe3e8..7895551d 100644 --- a/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.metadata.json +++ b/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "elbv2_waf_acl_attached", - "CheckTitle": "Check if Application Load Balancer has a WAF ACL attached.", - "CheckType": ["Infrastructure Security"], - "ServiceName": "elbv2", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", - "Description": "Check if Application Load Balancer has a WAF ACL attached.", - "Risk": "If not WAF ACL is attached risk of web attacks increases.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Using the AWS Management Console open the AWS WAF console to attach an ACL.", - "Url": "https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-associating-aws-resource.html" - } + "Provider": "aws", + "CheckID": "elbv2_waf_acl_attached", + "CheckTitle": "Check if Application Load Balancer has a WAF ACL attached.", + "CheckType": [ + "Infrastructure Security" + ], + "ServiceName": "elbv2", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsElasticLoadBalancingV2LoadBalancer", + "Description": "Check if Application Load Balancer has a WAF ACL attached.", + "Risk": "If not WAF ACL is attached risk of web attacks increases.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Using the AWS Management Console open the AWS WAF console to attach an ACL.", + "Url": "https://docs.aws.amazon.com/waf/latest/developerguide/web-acl-associating-aws-resource.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.py b/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.py index 80205bca..b596f1d4 100644 --- a/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.py +++ b/providers/aws/services/elbv2/elbv2_waf_acl_attached/elbv2_waf_acl_attached.py @@ -9,7 +9,7 @@ class elbv2_waf_acl_attached(Check): findings = [] for lb in elbv2_client.loadbalancersv2: if lb.type == "application": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = lb.region report.resource_id = lb.name report.resource_arn = lb.arn diff --git a/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.metadata.json b/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.metadata.json index 7fa53092..09dfda81 100644 --- a/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.metadata.json +++ b/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.py b/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.py index 855c762e..01a216d2 100644 --- a/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.py +++ b/providers/aws/services/emr/emr_cluster_account_public_block_enabled/emr_cluster_account_public_block_enabled.py @@ -6,7 +6,7 @@ class emr_cluster_account_public_block_enabled(Check): def execute(self): findings = [] for region in emr_client.block_public_access_configuration: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = region report.resource_id = emr_client.audited_account diff --git a/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.metadata.json b/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.metadata.json index 4501e565..1abfd530 100644 --- a/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.metadata.json +++ b/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.py b/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.py index 62a51aaa..b4df38c0 100644 --- a/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.py +++ b/providers/aws/services/emr/emr_cluster_master_nodes_no_public_ip/emr_cluster_master_nodes_no_public_ip.py @@ -11,7 +11,7 @@ class emr_cluster_master_nodes_no_public_ip(Check): ClusterStatus.TERMINATED, ClusterStatus.TERMINATED_WITH_ERRORS, ): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.id report.resource_arn = cluster.arn diff --git a/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.metadata.json b/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.metadata.json index c0171564..3997649b 100644 --- a/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.metadata.json +++ b/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.py b/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.py index 72b028f5..a2673541 100644 --- a/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.py +++ b/providers/aws/services/emr/emr_cluster_publicly_accesible/emr_cluster_publicly_accesible.py @@ -15,7 +15,7 @@ class emr_cluster_publicly_accesible(Check): ClusterStatus.TERMINATED, ClusterStatus.TERMINATED_WITH_ERRORS, ): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.id report.resource_arn = cluster.arn diff --git a/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.metadata.json b/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.metadata.json index ac8888ce..c313fbe5 100644 --- a/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.metadata.json +++ b/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.py b/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.py index cd7987c2..62d67f79 100644 --- a/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.py +++ b/providers/aws/services/glacier/glacier_vaults_policy_public_access/glacier_vaults_policy_public_access.py @@ -8,7 +8,7 @@ class glacier_vaults_policy_public_access(Check): def execute(self): findings = [] for vault in glacier_client.vaults.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = vault.region report.resource_id = vault.name report.resource_arn = vault.arn diff --git a/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.metadata.json index d1b63664..c4fb2318 100644 --- a/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_data_catalogs_connection_passwords_encryption_enabled", "CheckTitle": "Check if Glue data catalog settings have encrypt connection password enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.py b/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.py index 390524eb..0a7e20d8 100644 --- a/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.py +++ b/providers/aws/services/glue/glue_data_catalogs_connection_passwords_encryption_enabled/glue_data_catalogs_connection_passwords_encryption_enabled.py @@ -6,7 +6,7 @@ class glue_data_catalogs_connection_passwords_encryption_enabled(Check): def execute(self): findings = [] for encryption in glue_client.catalog_encryption_settings: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = glue_client.audited_account report.region = encryption.region report.status = "FAIL" diff --git a/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.metadata.json index 8c1ed9ef..da182c42 100644 --- a/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_data_catalogs_metadata_encryption_enabled", "CheckTitle": "Check if Glue data catalog settings have metadata encryption enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.py b/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.py index 265a4212..78c44424 100644 --- a/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.py +++ b/providers/aws/services/glue/glue_data_catalogs_metadata_encryption_enabled/glue_data_catalogs_metadata_encryption_enabled.py @@ -6,7 +6,7 @@ class glue_data_catalogs_metadata_encryption_enabled(Check): def execute(self): findings = [] for encryption in glue_client.catalog_encryption_settings: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = glue_client.audited_account report.region = encryption.region report.status = "FAIL" diff --git a/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.metadata.json b/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.metadata.json index 2ad4a881..82885310 100644 --- a/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.metadata.json +++ b/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_database_connections_ssl_enabled", "CheckTitle": "Check if Glue database connection has SSL connection enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.py b/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.py index 2aa8bf33..ad98fce4 100644 --- a/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.py +++ b/providers/aws/services/glue/glue_database_connections_ssl_enabled/glue_database_connections_ssl_enabled.py @@ -6,7 +6,7 @@ class glue_database_connections_ssl_enabled(Check): def execute(self): findings = [] for conn in glue_client.connections: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = conn.name report.region = conn.region report.status = "FAIL" diff --git a/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.metadata.json index 7b268cbe..4f4d9920 100644 --- a/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_development_endpoints_cloudwatch_logs_encryption_enabled", "CheckTitle": "Check if Glue development endpoints have CloudWatch logs encryption enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.py b/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.py index cc1b3810..9b8fa09d 100644 --- a/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.py +++ b/providers/aws/services/glue/glue_development_endpoints_cloudwatch_logs_encryption_enabled/glue_development_endpoints_cloudwatch_logs_encryption_enabled.py @@ -7,7 +7,7 @@ class glue_development_endpoints_cloudwatch_logs_encryption_enabled(Check): findings = [] for endpoint in glue_client.dev_endpoints: no_sec_configs = True - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = endpoint.name report.region = endpoint.region for sec_config in glue_client.security_configs: diff --git a/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.metadata.json index fefc0da4..5c7a8797 100644 --- a/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_development_endpoints_job_bookmark_encryption_enabled", "CheckTitle": "Check if Glue development endpoints have Job bookmark encryption enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.py b/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.py index a33cbe5b..eb7cc997 100644 --- a/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.py +++ b/providers/aws/services/glue/glue_development_endpoints_job_bookmark_encryption_enabled/glue_development_endpoints_job_bookmark_encryption_enabled.py @@ -7,7 +7,7 @@ class glue_development_endpoints_job_bookmark_encryption_enabled(Check): findings = [] for endpoint in glue_client.dev_endpoints: no_sec_configs = True - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = endpoint.name report.region = endpoint.region for sec_config in glue_client.security_configs: diff --git a/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.metadata.json index 346f3511..9a19cf08 100644 --- a/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_development_endpoints_s3_encryption_enabled", "CheckTitle": "Check if Glue development endpoints have S3 encryption enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.py b/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.py index 6b6b9abb..06464695 100644 --- a/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.py +++ b/providers/aws/services/glue/glue_development_endpoints_s3_encryption_enabled/glue_development_endpoints_s3_encryption_enabled.py @@ -7,7 +7,7 @@ class glue_development_endpoints_s3_encryption_enabled(Check): findings = [] for endpoint in glue_client.dev_endpoints: no_sec_configs = True - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = endpoint.name report.region = endpoint.region for sec_config in glue_client.security_configs: diff --git a/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.metadata.json index e40a54b0..7bbe1203 100644 --- a/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.metadata.json @@ -1,8 +1,10 @@ { "Provider": "aws", - "CheckID": "glue_development_endpoints_s3_encryption_enabled", + "CheckID": "glue_etl_jobs_amazon_s3_encryption_enabled", "CheckTitle": "Check if Glue ETL Jobs have S3 encryption enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.py b/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.py index f5b34385..002d9bda 100644 --- a/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.py +++ b/providers/aws/services/glue/glue_etl_jobs_amazon_s3_encryption_enabled/glue_etl_jobs_amazon_s3_encryption_enabled.py @@ -7,7 +7,7 @@ class glue_etl_jobs_amazon_s3_encryption_enabled(Check): findings = [] for job in glue_client.jobs: no_sec_configs = True - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = job.name report.region = job.region for sec_config in glue_client.security_configs: diff --git a/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.metadata.json index 77616477..dcd960d6 100644 --- a/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_etl_jobs_cloudwatch_logs_encryption_enabled", "CheckTitle": "Check if Glue ETL Jobs have CloudWatch Logs encryption enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.py b/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.py index 7da40aa8..d1271073 100644 --- a/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.py +++ b/providers/aws/services/glue/glue_etl_jobs_cloudwatch_logs_encryption_enabled/glue_etl_jobs_cloudwatch_logs_encryption_enabled.py @@ -7,7 +7,7 @@ class glue_etl_jobs_cloudwatch_logs_encryption_enabled(Check): findings = [] for job in glue_client.jobs: no_sec_configs = True - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = job.name report.region = job.region for sec_config in glue_client.security_configs: diff --git a/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.metadata.json b/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.metadata.json index 8497daa9..30de3bf6 100644 --- a/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.metadata.json +++ b/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "glue_etl_jobs_job_bookmark_encryption_enabled", "CheckTitle": "Check if Glue ETL Jobs have Job bookmark encryption enabled.", - "CheckType": ["Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks/Industry and Regulatory Standards/CIS AWS Foundations Benchmark" + ], "ServiceName": "glue", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:glue:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Logging and Monitoring", - "Compliance": [] + "Notes": "Logging and Monitoring" } diff --git a/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.py b/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.py index 65d61f62..c1d6ef88 100644 --- a/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.py +++ b/providers/aws/services/glue/glue_etl_jobs_job_bookmark_encryption_enabled/glue_etl_jobs_job_bookmark_encryption_enabled.py @@ -7,7 +7,7 @@ class glue_etl_jobs_job_bookmark_encryption_enabled(Check): findings = [] for job in glue_client.jobs: no_sec_configs = True - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = job.name report.region = job.region for sec_config in glue_client.security_configs: diff --git a/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.metadata.json b/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.metadata.json index b2bbaa19..4aa262be 100644 --- a/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.metadata.json +++ b/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "guardduty_is_enabled", - "CheckTitle": "Check if GuardDuty is enabled", - "CheckType": [], - "ServiceName": "guardduty", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id", - "Severity": "high", - "ResourceType": "AwsGuardDutyDetector", - "Description": "Check if GuardDuty is enabled", - "Risk": "Amazon GuardDuty is a continuous security monitoring service that analyzes and processes several datasources.", - "RelatedUrl": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_settingup.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/guardduty-enabled.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/guardduty-enabled.html", - "Terraform": "https://docs.bridgecrew.io/docs/ensure-guardduty-is-enabled-to-specific-orgregion#fix---buildtime" - }, - "Recommendation": { - "Text": "Enable GuardDuty and analyze its findings.", - "Url": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_settingup.html" - } + "Provider": "aws", + "CheckID": "guardduty_is_enabled", + "CheckTitle": "Check if GuardDuty is enabled", + "CheckType": [], + "ServiceName": "guardduty", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id", + "Severity": "high", + "ResourceType": "AwsGuardDutyDetector", + "Description": "Check if GuardDuty is enabled", + "Risk": "Amazon GuardDuty is a continuous security monitoring service that analyzes and processes several datasources.", + "RelatedUrl": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_settingup.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/guardduty-enabled.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/guardduty-enabled.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-guardduty-is-enabled-to-specific-orgregion#fix---buildtime" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable GuardDuty and analyze its findings.", + "Url": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_settingup.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.py b/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.py index 1130094b..c6cfa3c4 100644 --- a/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.py +++ b/providers/aws/services/guardduty/guardduty_is_enabled/guardduty_is_enabled.py @@ -6,7 +6,7 @@ class guardduty_is_enabled(Check): def execute(self): findings = [] for detector in guardduty_client.detectors: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = detector.region report.resource_id = detector.id report.resource_arn = detector.arn diff --git a/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.metadata.json b/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.metadata.json index 98311e7a..7fe74ec7 100644 --- a/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.metadata.json +++ b/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "guardduty_no_high_severity_findings", - "CheckTitle": "There are High severity GuardDuty findings ", - "CheckType": [], - "ServiceName": "guardduty", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id", - "Severity": "high", - "ResourceType": "AwsGuardDutyDetector", - "Description": "There are High severity GuardDuty findings ", - "Risk": "If critical findings are not addressed threats can spread in the environment.", - "RelatedUrl": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/findings.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/findings.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Review and remediate critical GuardDuty findings as quickly as possible.", - "Url": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html" - } + "Provider": "aws", + "CheckID": "guardduty_no_high_severity_findings", + "CheckTitle": "There are High severity GuardDuty findings ", + "CheckType": [], + "ServiceName": "guardduty", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id", + "Severity": "high", + "ResourceType": "AwsGuardDutyDetector", + "Description": "There are High severity GuardDuty findings ", + "Risk": "If critical findings are not addressed threats can spread in the environment.", + "RelatedUrl": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/findings.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/GuardDuty/findings.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Review and remediate critical GuardDuty findings as quickly as possible.", + "Url": "https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.py b/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.py index 19251c16..9d321adf 100644 --- a/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.py +++ b/providers/aws/services/guardduty/guardduty_no_high_severity_findings/guardduty_no_high_severity_findings.py @@ -6,7 +6,7 @@ class guardduty_no_high_severity_findings(Check): def execute(self): findings = [] for detector in guardduty_client.detectors: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = detector.region report.resource_id = detector.id report.resource_arn = detector.arn diff --git a/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.metadata.json b/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.metadata.json index 6ee397e0..22045355 100644 --- a/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.metadata.json +++ b/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "iam_administrator_access_with_mfa", "CheckTitle": "Ensure users of groups with AdministratorAccess policy have MFA tokens enabled", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.py b/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.py index 1717a39c..bd1e16e4 100644 --- a/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.py +++ b/providers/aws/services/iam/iam_administrator_access_with_mfa/iam_administrator_access_with_mfa.py @@ -8,7 +8,7 @@ class iam_administrator_access_with_mfa(Check): response = iam_client.groups for group in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = group.name report.resource_arn = group.arn report.region = iam_client.region diff --git a/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.metadata.json b/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.metadata.json index 873a4f83..8c7f3702 100644 --- a/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.metadata.json +++ b/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_avoid_root_usage", "CheckTitle": "Avoid the use of the root accounts", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.1" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.py b/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.py index 0959836b..3a7c0721 100644 --- a/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.py +++ b/providers/aws/services/iam/iam_avoid_root_usage/iam_avoid_root_usage.py @@ -13,7 +13,7 @@ class iam_avoid_root_usage(Check): for user in response: if user["user"] == "": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user["user"] report.resource_arn = user["arn"] diff --git a/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.metadata.json b/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.metadata.json index de824c89..d12ee0fe 100644 --- a/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.metadata.json +++ b/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "iam-check-saml-providers-sts", - "CheckTitle": "Check if there are SAML Providers then STS can be used", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "iam", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "low", - "ResourceType": "Other", - "Description": "Check if there are SAML Providers then STS can be used", - "Risk": "Without SAML provider users with AWS CLI or AWS API access can use IAM static credentials. SAML helps users to assume role by default each time they authenticate.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Enable SAML provider and use temporary credentials. You can use temporary security credentials to make programmatic requests for AWS resources using the AWS CLI or AWS API (using the AWS SDKs ). The temporary credentials provide the same permissions that you have with use long-term security credentials such as IAM user credentials. In case of not having SAML provider capabilities prevent usage of long-lived credentials.", - "Url": "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html" - } + "Provider": "aws", + "CheckID": "iam_check_saml_providers_sts", + "CheckTitle": "Check if there are SAML Providers then STS can be used", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "low", + "ResourceType": "Other", + "Description": "Check if there are SAML Providers then STS can be used", + "Risk": "Without SAML provider users with AWS CLI or AWS API access can use IAM static credentials. SAML helps users to assume role by default each time they authenticate.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable SAML provider and use temporary credentials. You can use temporary security credentials to make programmatic requests for AWS resources using the AWS CLI or AWS API (using the AWS SDKs ). The temporary credentials provide the same permissions that you have with use long-term security credentials such as IAM user credentials. In case of not having SAML provider capabilities prevent usage of long-lived credentials.", + "Url": "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.py b/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.py index b8603938..5872273a 100644 --- a/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.py +++ b/providers/aws/services/iam/iam_check_saml_providers_sts/iam_check_saml_providers_sts.py @@ -6,7 +6,7 @@ class iam_check_saml_providers_sts(Check): def execute(self) -> Check_Report: findings = [] for provider in iam_client.saml_providers: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) provider_name = provider["Arn"].split("/")[1] report.resource_id = provider_name report.resource_arn = provider["Arn"] diff --git a/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.metadata.json b/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.metadata.json index 19dbbe86..5d0619da 100644 --- a/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.metadata.json +++ b/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "iam_disable_30_days_credentials", "CheckTitle": "Ensure credentials unused for 30 days or greater are disabled", - "CheckType": ["Software and Configuration Checks"], + "CheckType": [ + "Software and Configuration Checks" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.py b/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.py index 69994752..3d73dfba 100644 --- a/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.py +++ b/providers/aws/services/iam/iam_disable_30_days_credentials/iam_disable_30_days_credentials.py @@ -12,7 +12,7 @@ class iam_disable_30_days_credentials(Check): response = iam_client.users for user in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = user.name report.resource_arn = user.arn report.region = iam_client.region diff --git a/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.metadata.json b/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.metadata.json index ce2297b5..8b771875 100644 --- a/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.metadata.json +++ b/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "iam_disable_45_days_credentials", "CheckTitle": "Ensure credentials unused for 45 days or greater are disabled", - "CheckType": ["Software and Configuration Checks"], + "CheckType": [ + "Software and Configuration Checks" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.py b/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.py index 75d0a2da..43944ce8 100644 --- a/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.py +++ b/providers/aws/services/iam/iam_disable_45_days_credentials/iam_disable_45_days_credentials.py @@ -12,7 +12,7 @@ class iam_disable_45_days_credentials(Check): response = iam_client.users for user in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = user.name report.resource_arn = user.arn report.region = iam_client.region diff --git a/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.metadata.json b/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.metadata.json index f8913663..b3ffa46e 100644 --- a/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.metadata.json +++ b/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.metadata.json @@ -2,18 +2,8 @@ "Categories": [], "CheckID": "iam_disable_90_days_credentials", "CheckTitle": "Ensure credentials unused for 90 days or greater are disabled", - "CheckType": ["Software and Configuration Checks"], - "Compliance": [ - { - "Control": [ - "1.3" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } + "CheckType": [ + "Software and Configuration Checks" ], "DependsOn": [], "Description": "Ensure credentials unused for 90 days or greater are disabled", diff --git a/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.py b/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.py index 614d1896..85512650 100644 --- a/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.py +++ b/providers/aws/services/iam/iam_disable_90_days_credentials/iam_disable_90_days_credentials.py @@ -12,7 +12,7 @@ class iam_disable_90_days_credentials(Check): response = iam_client.users for user in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user.name report.resource_arn = user.arn diff --git a/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.metadata.json b/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.metadata.json index 65373c77..d5a37dcf 100644 --- a/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.metadata.json +++ b/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.metadata.json @@ -1,35 +1,36 @@ { - "Provider": "aws", - "CheckID": "iam_no_custom_policy_permissive_role_assumption", - "CheckTitle": "Ensure that no custom IAM policies exist which allow permissive role assumption (e.g. sts:AssumeRole on *)", - "CheckType": ["Software and Configuration Checks"], - "ServiceName": "iam", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "critical", - "ResourceType": "AwsIamPolicy", - "Description": "Ensure that no custom IAM policies exist which allow permissive role assumption (e.g. sts:AssumeRole on *)", - "Risk": "If not restricted unintended access could happen.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Use the least privilege principle when granting permissions.", - "Url": "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" - } + "Provider": "aws", + "CheckID": "iam_no_custom_policy_permissive_role_assumption", + "CheckTitle": "Ensure that no custom IAM policies exist which allow permissive role assumption (e.g. sts:AssumeRole on *)", + "CheckType": [ + "Software and Configuration Checks" + ], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "critical", + "ResourceType": "AwsIamPolicy", + "Description": "Ensure that no custom IAM policies exist which allow permissive role assumption (e.g. sts:AssumeRole on *)", + "Risk": "If not restricted unintended access could happen.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "CAF Security Epic: IAM", - "Compliance": [] - } + "Recommendation": { + "Text": "Use the least privilege principle when granting permissions.", + "Url": "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "CAF Security Epic: IAM" +} diff --git a/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py b/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py index 80a77056..c15e3f2f 100644 --- a/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py +++ b/providers/aws/services/iam/iam_no_custom_policy_permissive_role_assumption/iam_no_custom_policy_permissive_role_assumption.py @@ -6,7 +6,7 @@ class iam_no_custom_policy_permissive_role_assumption(Check): def execute(self) -> Check_Report: findings = [] for index, policy_document in enumerate(iam_client.list_policies_version): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_arn = iam_client.policies[index]["Arn"] report.resource_id = iam_client.policies[index]["PolicyName"] diff --git a/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.metadata.json b/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.metadata.json index 8808458f..f1d68ef0 100644 --- a/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.metadata.json +++ b/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_no_expired_server_certificates_stored", "CheckTitle": "Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed.", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Data Protection", - "Compliance": [] + "Notes": "Data Protection" } diff --git a/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.py b/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.py index edd87024..71e1fdff 100644 --- a/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.py +++ b/providers/aws/services/iam/iam_no_expired_server_certificates_stored/iam_no_expired_server_certificates_stored.py @@ -9,7 +9,7 @@ class iam_no_expired_server_certificates_stored(Check): findings = [] for certificate in iam_client.server_certificates: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = certificate.id report.resource_arn = certificate.arn diff --git a/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.metadata.json b/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.metadata.json index b609db07..65bb0a60 100644 --- a/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.metadata.json +++ b/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_no_root_access_key", "CheckTitle": "Ensure no root account access key exists", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.12" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.py b/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.py index 95626c1a..8184d1d3 100644 --- a/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.py +++ b/providers/aws/services/iam/iam_no_root_access_key/iam_no_root_access_key.py @@ -9,7 +9,7 @@ class iam_no_root_access_key(Check): for user in response: if user["user"] == "": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user["user"] report.resource_arn = user["arn"] diff --git a/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.metadata.json b/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.metadata.json index f2863dbf..fe70fb42 100644 --- a/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.metadata.json +++ b/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.metadata.json @@ -1,8 +1,12 @@ { "Provider": "aws", - "CheckID": "iam-password-policy-expires-passwords-within-90-days-or-less", + "CheckID": "iam_password_policy_expires_passwords_within_90_days_or_less", "CheckTitle": "Ensure IAM password policy expires passwords within 90 days or less", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.11" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.py b/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.py index bf765fd2..51710d0d 100644 --- a/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.py +++ b/providers/aws/services/iam/iam_password_policy_expires_passwords_within_90_days_or_less/iam_password_policy_expires_passwords_within_90_days_or_less.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_password_policy_expires_passwords_within_90_days_or_less(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "password_policy" # Check if password policy exists diff --git a/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.metadata.json b/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.metadata.json index bbb68c03..a9736da8 100644 --- a/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.metadata.json +++ b/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_password_policy_lowercase", "CheckTitle": "Ensure IAM password policy require at least one lowercase letter", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.6" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.py b/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.py index c5dc5f99..9ffa8001 100644 --- a/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.py +++ b/providers/aws/services/iam/iam_password_policy_lowercase/iam_password_policy_lowercase.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_password_policy_lowercase(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "password_policy" # Check if password policy exists diff --git a/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.metadata.json b/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.metadata.json index 256679b8..b4efad67 100644 --- a/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.metadata.json +++ b/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_password_policy_minimum_length_14", "CheckTitle": "Ensure IAM password policy requires minimum length of 14 or greater", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.9" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.py b/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.py index a3351cb2..11628b5e 100644 --- a/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.py +++ b/providers/aws/services/iam/iam_password_policy_minimum_length_14/iam_password_policy_minimum_length_14.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_password_policy_minimum_length_14(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "password_policy" # Check if password policy exists diff --git a/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.metadata.json b/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.metadata.json index 57269fcf..926ba41d 100644 --- a/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.metadata.json +++ b/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_password_policy_number", "CheckTitle": "Ensure IAM password policy require at least one number", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.8" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.py b/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.py index ec8d2d9b..758a17a3 100644 --- a/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.py +++ b/providers/aws/services/iam/iam_password_policy_number/iam_password_policy_number.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_password_policy_number(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "password_policy" # Check if password policy exists diff --git a/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.metadata.json b/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.metadata.json index 7f0e22fe..0f8e7969 100644 --- a/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.metadata.json +++ b/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_password_policy_reuse_24", "CheckTitle": "Ensure IAM password policy prevents password reuse: 24 or greater", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.10" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.py b/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.py index cc63d28d..a3c535e5 100644 --- a/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.py +++ b/providers/aws/services/iam/iam_password_policy_reuse_24/iam_password_policy_reuse_24.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_password_policy_reuse_24(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "password_policy" # Check if password policy exists diff --git a/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.metadata.json b/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.metadata.json index cc499207..c49c3740 100644 --- a/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.metadata.json +++ b/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_password_policy_symbol", "CheckTitle": "Ensure IAM password policy require at least one symbol", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.7" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.py b/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.py index 864b8cee..5b15b6ea 100644 --- a/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.py +++ b/providers/aws/services/iam/iam_password_policy_symbol/iam_password_policy_symbol.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_password_policy_symbol(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "password_policy" # Check if password policy exists diff --git a/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.metadata.json b/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.metadata.json index 17e73d32..6d4017cc 100644 --- a/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.metadata.json +++ b/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_password_policy_uppercase", "CheckTitle": "Ensure IAM password policy requires at least one uppercase letter", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.5" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.py b/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.py index 55aad3d3..08903c69 100644 --- a/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.py +++ b/providers/aws/services/iam/iam_password_policy_uppercase/iam_password_policy_uppercase.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_password_policy_uppercase(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "password_policy" # Check if password policy exists diff --git a/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.metadata.json b/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.metadata.json index 7a06713f..61f29aaf 100644 --- a/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.metadata.json +++ b/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_policy_allows_privilege_escalation", "CheckTitle": "Ensure no Customer Managed IAM policies allow actions that may lead into Privilege Escalation", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "CAF Security Epic: IAM", - "Compliance": [] + "Notes": "CAF Security Epic: IAM" } diff --git a/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py b/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py index 789485e3..95f96e00 100644 --- a/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py +++ b/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py @@ -61,7 +61,7 @@ class iam_policy_allows_privilege_escalation(Check): } findings = [] for policy in iam_client.customer_managed_policies: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = policy["PolicyName"] report.resource_arn = policy["Arn"] report.region = iam_client.region diff --git a/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.metadata.json b/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.metadata.json index 9a1eac48..378952f7 100644 --- a/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.metadata.json +++ b/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_policy_attached_only_to_group_or_roles", "CheckTitle": "Ensure IAM policies are attached only to groups or roles", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "CAF Security Epic: IAM", - "Compliance": [ - { - "Control": [ - "1.16" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "CAF Security Epic: IAM" } diff --git a/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.py b/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.py index a52bd3be..8373a072 100644 --- a/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.py +++ b/providers/aws/services/iam/iam_policy_attached_only_to_group_or_roles/iam_policy_attached_only_to_group_or_roles.py @@ -7,14 +7,14 @@ class iam_policy_attached_only_to_group_or_roles(Check): findings = [] if iam_client.users: for user in iam_client.users: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user.name report.resource_arn = user.arn if user.attached_policies or user.inline_policies: if user.attached_policies: for policy in user.attached_policies: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.status = "FAIL" report.status_extended = f"User {user.name} has attached the following policy {policy['PolicyName']}" @@ -22,7 +22,7 @@ class iam_policy_attached_only_to_group_or_roles(Check): findings.append(report) if user.inline_policies: for policy in user.inline_policies: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.status = "FAIL" report.status_extended = f"User {user.name} has the following inline policy {policy}" diff --git a/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.metadata.json b/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.metadata.json index 886a4868..4b36475c 100644 --- a/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.metadata.json +++ b/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.metadata.json @@ -1,46 +1,38 @@ { - "Provider": "aws", - "CheckID": "iam_policy_no_administrative_privileges", - "CheckTitle": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not created", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "iam", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsIamPolicy", - "Description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not created", - "Risk": "IAM policies are the means by which privileges are granted to users; groups; or roles. It is recommended and considered a standard security advice to grant least privilege—that is; granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform only those tasks instead of allowing full administrative privileges. Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://docs.bridgecrew.io/docs/iam_47#cli-command", - "NativeIaC": "", - "Other": "https://docs.bridgecrew.io/docs/iam_47#aws-console", - "Terraform": "https://docs.bridgecrew.io/docs/iam_47#terraform" - }, - "Recommendation": { - "Text": "It is more secure to start with a minimum set of permissions and grant additional permissions as necessary; rather than starting with permissions that are too lenient and then trying to tighten them later. List policies an analyze if permissions are the least possible to conduct business activities.", - "Url": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" - } + "Provider": "aws", + "CheckID": "iam_policy_no_administrative_privileges", + "CheckTitle": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not created", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsIamPolicy", + "Description": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not created", + "Risk": "IAM policies are the means by which privileges are granted to users; groups; or roles. It is recommended and considered a standard security advice to grant least privilege—that is; granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform only those tasks instead of allowing full administrative privileges. Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://docs.bridgecrew.io/docs/iam_47#cli-command", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/iam_47#aws-console", + "Terraform": "https://docs.bridgecrew.io/docs/iam_47#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "CAF Security Epic: IAM", - "Compliance": [ - { - "Control": [ - "1.22" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] - } + "Recommendation": { + "Text": "It is more secure to start with a minimum set of permissions and grant additional permissions as necessary; rather than starting with permissions that are too lenient and then trying to tighten them later. List policies an analyze if permissions are the least possible to conduct business activities.", + "Url": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "CAF Security Epic: IAM" +} diff --git a/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py b/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py index e30741d8..70947abf 100644 --- a/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py +++ b/providers/aws/services/iam/iam_policy_no_administrative_privileges/iam_policy_no_administrative_privileges.py @@ -6,7 +6,7 @@ class iam_policy_no_administrative_privileges(Check): def execute(self) -> Check_Report: findings = [] for index, policy_document in enumerate(iam_client.list_policies_version): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_arn = iam_client.policies[index]["Arn"] report.resource_id = iam_client.policies[index]["PolicyName"] diff --git a/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.metadata.json b/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.metadata.json index 0db47d03..e55dbdba 100644 --- a/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.metadata.json +++ b/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_root_hardware_mfa_enabled", "CheckTitle": "Ensure hardware MFA is enabled for the root account", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.14" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.py b/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.py index 7dbb57f8..f2a941eb 100644 --- a/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.py +++ b/providers/aws/services/iam/iam_root_hardware_mfa_enabled/iam_root_hardware_mfa_enabled.py @@ -6,7 +6,7 @@ class iam_root_hardware_mfa_enabled(Check): def execute(self) -> Check_Report: findings = [] virtual_mfa = False - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "root" report.resource_arn = f"arn:aws:iam::{iam_client.account}:root" diff --git a/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.metadata.json b/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.metadata.json index eb1525e0..9c1d0c9d 100644 --- a/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.metadata.json +++ b/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_root_mfa_enabled", "CheckTitle": "Ensure MFA is enabled for the root account", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.13" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.py b/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.py index 16ca9fb8..c67716ca 100644 --- a/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.py +++ b/providers/aws/services/iam/iam_root_mfa_enabled/iam_root_mfa_enabled.py @@ -8,7 +8,7 @@ class iam_root_mfa_enabled(Check): for user in iam_client.credential_report: if user["user"] == "": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user["user"] report.resource_arn = user["arn"] diff --git a/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.metadata.json b/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.metadata.json index c0e160ad..6017a8e1 100644 --- a/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.metadata.json +++ b/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_rotate_access_key_90_days", "CheckTitle": "Ensure access keys are rotated every 90 days or less", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,17 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [ - { - "Control": [ - "1.4" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py b/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py index cbd1803c..6fce660b 100644 --- a/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py +++ b/providers/aws/services/iam/iam_rotate_access_key_90_days/iam_rotate_access_key_90_days.py @@ -12,7 +12,7 @@ class iam_rotate_access_key_90_days(Check): response = iam_client.credential_report for user in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user["user"] report.resource_arn = user["arn"] diff --git a/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.metadata.json b/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.metadata.json index d2d940df..c7e4a15f 100644 --- a/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.metadata.json +++ b/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.metadata.json @@ -1,47 +1,38 @@ { - "Provider": "aws", - "CheckID": "iam_support_role_created", - "CheckTitle": "Ensure a support role has been created to manage incidents with AWS Support", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "iam", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsIamRole", - "Description": "Ensure a support role has been created to manage incidents with AWS Support", - "Risk": "AWS provides a support center that can be used for incident notification and response; as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Create an IAM role for managing incidents with AWS.", - "Url": "https://docs.aws.amazon.com/awssupport/latest/user/using-service-linked-roles-sup.html" - } + "Provider": "aws", + "CheckID": "iam_support_role_created", + "CheckTitle": "Ensure a support role has been created to manage incidents with AWS Support", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsIamRole", + "Description": "Ensure a support role has been created to manage incidents with AWS Support", + "Risk": "AWS provides a support center that can be used for incident notification and response; as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "CAF Security Epic: IAM", - "Compliance": [ - { - "Control": [ - "1.20" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] - } - + "Recommendation": { + "Text": "Create an IAM role for managing incidents with AWS.", + "Url": "https://docs.aws.amazon.com/awssupport/latest/user/using-service-linked-roles-sup.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "CAF Security Epic: IAM" +} diff --git a/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.py b/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.py index 7363bde7..cea98f28 100644 --- a/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.py +++ b/providers/aws/services/iam/iam_support_role_created/iam_support_role_created.py @@ -5,7 +5,7 @@ from providers.aws.services.iam.iam_client import iam_client class iam_support_role_created(Check): def execute(self) -> Check_Report: findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = "AWSSupportServiceRolePolicy" report.resource_arn = ( diff --git a/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.metadata.json b/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.metadata.json index ab9e995d..1d049606 100644 --- a/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.metadata.json +++ b/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_user_hardware_mfa_enabled", "CheckTitle": "Check if IAM users have Hardware MFA enabled.", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.py b/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.py index 8fe5e27f..a4c23b14 100644 --- a/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.py +++ b/providers/aws/services/iam/iam_user_hardware_mfa_enabled/iam_user_hardware_mfa_enabled.py @@ -8,7 +8,7 @@ class iam_user_hardware_mfa_enabled(Check): response = iam_client.users for user in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = user.name report.resource_arn = user.arn report.region = iam_client.region diff --git a/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.metadata.json b/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.metadata.json index 243ad2db..c017ca65 100644 --- a/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.metadata.json +++ b/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_user_mfa_enabled_console_access", "CheckTitle": "Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password.", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.py b/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.py index 28883a3e..ef4f15ad 100644 --- a/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.py +++ b/providers/aws/services/iam/iam_user_mfa_enabled_console_access/iam_user_mfa_enabled_console_access.py @@ -7,7 +7,7 @@ class iam_user_mfa_enabled_console_access(Check): findings = [] response = iam_client.credential_report for user in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = user["user"] report.resource_arn = user["arn"] report.region = iam_client.region diff --git a/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.metadata.json b/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.metadata.json index 41808c4e..052094cb 100644 --- a/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.metadata.json +++ b/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.metadata.json @@ -1,46 +1,38 @@ { - "Provider": "aws", - "CheckID": "iam_user_no_setup_initial_access_key", - "CheckTitle": "Do not setup access keys during initial user setup for all IAM users that have a console password", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], - "ServiceName": "iam", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsIamUser", - "Description": "Do not setup access keys during initial user setup for all IAM users that have a console password", - "Risk": "AWS console defaults the checkbox for creating access keys to enabled. This results in many access keys being generated unnecessarily. In addition to unnecessary credentials; it also generates unnecessary management work in auditing and rotating these keys. Requiring that additional steps be taken by the user after their profile has been created will give a stronger indication of intent that access keys are (a) necessary for their work and (b) once the access key is established on an account that the keys may be in use somewhere in the organization.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "From the IAM console: generate credential report and disable not required keys.", - "Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html" - } + "Provider": "aws", + "CheckID": "iam_user_no_setup_initial_access_key", + "CheckTitle": "Do not setup access keys during initial user setup for all IAM users that have a console password", + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsIamUser", + "Description": "Do not setup access keys during initial user setup for all IAM users that have a console password", + "Risk": "AWS console defaults the checkbox for creating access keys to enabled. This results in many access keys being generated unnecessarily. In addition to unnecessary credentials; it also generates unnecessary management work in auditing and rotating these keys. Requiring that additional steps be taken by the user after their profile has been created will give a stronger indication of intent that access keys are (a) necessary for their work and (b) once the access key is established on an account that the keys may be in use somewhere in the organization.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "CAF Security Epic: IAM", - "Compliance": [ - { - "Control": [ - "1.21" - ], - "Framework": "CIS-AWS", - "Group": [ - "level1" - ], - "Version": "1.4" - } - ] - } + "Recommendation": { + "Text": "From the IAM console: generate credential report and disable not required keys.", + "Url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "CAF Security Epic: IAM" +} diff --git a/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.py b/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.py index 3c8e7e28..4737b660 100644 --- a/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.py +++ b/providers/aws/services/iam/iam_user_no_setup_initial_access_key/iam_user_no_setup_initial_access_key.py @@ -20,7 +20,7 @@ class iam_user_no_setup_initial_access_key(Check): and user_record["access_key_1_last_used_date"] == "N/A" and user_record["password_enabled"] == "true" ): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user_record["user"] report.resource_arn = user_record["arn"] @@ -34,7 +34,7 @@ class iam_user_no_setup_initial_access_key(Check): and user_record["access_key_2_last_used_date"] == "N/A" and user_record["password_enabled"] == "true" ): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user_record["user"] report.resource_arn = user_record["arn"] @@ -44,7 +44,7 @@ class iam_user_no_setup_initial_access_key(Check): ) findings.append(report) else: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = iam_client.region report.resource_id = user_record["user"] report.resource_arn = user_record["arn"] diff --git a/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.metadata.json b/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.metadata.json index 5d4b068b..fdd21266 100644 --- a/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.metadata.json +++ b/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.metadata.json @@ -2,7 +2,11 @@ "Provider": "aws", "CheckID": "iam_user_two_active_access_key", "CheckTitle": "Check if IAM users have two active access keys", - "CheckType": ["Software and Configuration Checks", "Industry and Regulatory Standards" ,"CIS AWS Foundations Benchmark"], + "CheckType": [ + "Software and Configuration Checks", + "Industry and Regulatory Standards", + "CIS AWS Foundations Benchmark" + ], "ServiceName": "iam", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +34,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.py b/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.py index 99b95236..36ed3f7e 100644 --- a/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.py +++ b/providers/aws/services/iam/iam_user_two_active_access_key/iam_user_two_active_access_key.py @@ -9,7 +9,7 @@ class iam_user_two_active_access_key(Check): findings = [] response = iam_client.credential_report for user in response: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = user["user"] report.resource_arn = user["arn"] report.region = iam_client.region diff --git a/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.metadata.json b/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.metadata.json index 0bbdcb88..92ea6716 100644 --- a/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.metadata.json +++ b/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "kms_cmk_are_used", "CheckTitle": "Check if there are CMK KMS keys not used.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "kms", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:kms:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.py b/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.py index b963f8ef..60240727 100644 --- a/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.py +++ b/providers/aws/services/kms/kms_cmk_are_used/kms_cmk_are_used.py @@ -8,7 +8,7 @@ class kms_cmk_are_used(Check): for key in kms_client.keys: # Only check CMKs keys if key.manager == "CUSTOMER": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = key.region report.resource_id = key.id report.resource_arn = key.arn diff --git a/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.metadata.json b/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.metadata.json index 1c126ea6..6cbc51dd 100644 --- a/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.metadata.json +++ b/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "kms_cmk_rotation_enabled", "CheckTitle": "Ensure rotation for customer created KMS CMKs is enabled.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "kms", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:kms:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.py b/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.py index d695417f..8b9138a4 100644 --- a/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.py +++ b/providers/aws/services/kms/kms_cmk_rotation_enabled/kms_cmk_rotation_enabled.py @@ -6,7 +6,7 @@ class kms_cmk_rotation_enabled(Check): def execute(self): findings = [] for key in kms_client.keys: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = key.region # Only check enabled CMKs keys if key.manager == "CUSTOMER" and key.state == "Enabled": diff --git a/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.metadata.json b/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.metadata.json index 87863691..ace7caf4 100644 --- a/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.metadata.json +++ b/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "kms_key_not_publicly_accessible", "CheckTitle": "Check exposed KMS keys", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "kms", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:kms:region:account-id:certificate/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.py b/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.py index 74845d83..96a65ed5 100644 --- a/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.py +++ b/providers/aws/services/kms/kms_key_not_publicly_accessible/kms_key_not_publicly_accessible.py @@ -9,7 +9,7 @@ class kms_key_not_publicly_accessible(Check): if ( key.manager == "CUSTOMER" and key.state == "Enabled" ): # only customer KMS have policies - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "PASS" report.status_extended = f"KMS key {key.id} is not exposed to Public." report.resource_id = key.id diff --git a/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.metadata.json b/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.metadata.json index 81c27693..65d7d8bd 100644 --- a/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.metadata.json +++ b/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "macie_is_enabled", "CheckTitle": "Check if Amazon Macie is enabled.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "macie", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:access-analyzer:region:account-id:analyzer/resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.py b/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.py index 1130855e..f620f708 100644 --- a/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.py +++ b/providers/aws/services/macie/macie_is_enabled/macie_is_enabled.py @@ -6,7 +6,7 @@ class macie_is_enabled(Check): def execute(self): findings = [] for session in macie_client.sessions: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = session.region report.resource_id = "Macie" if session.status == "ENABLED": diff --git a/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.metadata.json index fdfa1644..48f9f6cb 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_audit_logging_enabled", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have audit logging enabled", - "CheckType": ["Identify", "Logging"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "low", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have audit logging enabled", - "Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Make sure you are logging information about Amazon Elasticsearch Service operations.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/audit-logs.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_audit_logging_enabled", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have audit logging enabled", + "CheckType": [ + "Identify", + "Logging" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "low", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have audit logging enabled", + "Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Make sure you are logging information about Amazon Elasticsearch Service operations.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/audit-logs.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.py b/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.py index 5b0b12d1..7fafa516 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_audit_logging_enabled/opensearch_service_domains_audit_logging_enabled.py @@ -6,7 +6,7 @@ class opensearch_service_domains_audit_logging_enabled(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.metadata.json index bcab23b4..626e3035 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_cloudwatch_logging_enabled", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have logging enabled", - "CheckType": ["Identify", "Logging"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have logging enabled", - "Risk": "Amazon ES exposes four Elasticsearch/Opensearch logs through Amazon CloudWatch Logs: error logs; search slow logs; index slow logs; and audit logs.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://docs.bridgecrew.io/docs/elasticsearch_7#cli-command", - "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_7#cloudformation", - "Other": "https://docs.bridgecrew.io/docs/elasticsearch_7#fix---runtime", - "Terraform": "https://docs.bridgecrew.io/docs/elasticsearch_7#fix---buildtime" - }, - "Recommendation": { - "Text": "Enable Elasticsearch/Opensearch log. Create use cases for them. Using audit logs check for access denied events.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createdomain-configure-slow-logs.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_cloudwatch_logging_enabled", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have logging enabled", + "CheckType": [ + "Identify", + "Logging" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have logging enabled", + "Risk": "Amazon ES exposes four Elasticsearch/Opensearch logs through Amazon CloudWatch Logs: error logs; search slow logs; index slow logs; and audit logs.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://docs.bridgecrew.io/docs/elasticsearch_7#cli-command", + "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_7#cloudformation", + "Other": "https://docs.bridgecrew.io/docs/elasticsearch_7#fix---runtime", + "Terraform": "https://docs.bridgecrew.io/docs/elasticsearch_7#fix---buildtime" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable Elasticsearch/Opensearch log. Create use cases for them. Using audit logs check for access denied events.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createdomain-configure-slow-logs.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.py b/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.py index cf411084..03bada40 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_cloudwatch_logging_enabled/opensearch_service_domains_cloudwatch_logging_enabled.py @@ -6,7 +6,7 @@ class opensearch_service_domains_cloudwatch_logging_enabled(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.metadata.json index 5ea54c83..41421d53 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_encryption_at_rest_enabled", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have encryption at-rest enabled", - "CheckType": ["Protect", "Data protection", "Encryption of data at rest"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have encryption at-rest enabled", - "Risk": "If not enable unauthorized access to your data could risk increases.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/encryption-at-rest.html", - "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_3-enable-encryptionatrest#fix---builtime", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/encryption-at-rest.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Enable encryption at rest using AWS KMS to store and manage your encryption keys and the Advanced Encryption Standard algorithm with 256-bit keys (AES-256) to perform the encryption.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/encryption-at-rest.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_encryption_at_rest_enabled", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have encryption at-rest enabled", + "CheckType": [ + "Protect", + "Data protection", + "Encryption of data at rest" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have encryption at-rest enabled", + "Risk": "If not enable unauthorized access to your data could risk increases.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/encryption-at-rest.html", + "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_3-enable-encryptionatrest#fix---builtime", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/encryption-at-rest.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable encryption at rest using AWS KMS to store and manage your encryption keys and the Advanced Encryption Standard algorithm with 256-bit keys (AES-256) to perform the encryption.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/encryption-at-rest.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.py b/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.py index 0984d7a5..d390619c 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_encryption_at_rest_enabled/opensearch_service_domains_encryption_at_rest_enabled.py @@ -6,7 +6,7 @@ class opensearch_service_domains_encryption_at_rest_enabled(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.metadata.json index 16644b33..0efd174f 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_https_communications_enforced", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have enforce HTTPS enabled", - "CheckType": ["Protect", "Data protection", "Encryption of data in transit"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have enforce HTTPS enabled", - "Risk": "If not enable unauthorized access to your data could risk increases.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_6#fix---builtime", - "Other": "https://docs.bridgecrew.io/docs/elasticsearch_6#aws-console", - "Terraform": "" - }, - "Recommendation": { - "Text": "When creating ES Domains; enable 'Require HTTPS fo all traffic to the domain'", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_https_communications_enforced", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have enforce HTTPS enabled", + "CheckType": [ + "Protect", + "Data protection", + "Encryption of data in transit" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have enforce HTTPS enabled", + "Risk": "If not enable unauthorized access to your data could risk increases.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_6#fix---builtime", + "Other": "https://docs.bridgecrew.io/docs/elasticsearch_6#aws-console", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "When creating ES Domains; enable 'Require HTTPS fo all traffic to the domain'", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.py b/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.py index 17e85958..b8548630 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_https_communications_enforced/opensearch_service_domains_https_communications_enforced.py @@ -6,7 +6,7 @@ class opensearch_service_domains_https_communications_enforced(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.metadata.json index 75831710..8db19467 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_internal_user_database_enabled", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have internal user database enabled", - "CheckType": ["Protect", "Data protection"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have internal user database enabled", - "Risk": "Internal User Database is convenient for demos; for production environment use Federated authentication.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Remove users from internal user database and uso Cognito instead.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/fgac.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_internal_user_database_enabled", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have internal user database enabled", + "CheckType": [ + "Protect", + "Data protection" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have internal user database enabled", + "Risk": "Internal User Database is convenient for demos; for production environment use Federated authentication.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Remove users from internal user database and uso Cognito instead.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/fgac.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.py b/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.py index e2571913..c209e0e5 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_internal_user_database_enabled/opensearch_service_domains_internal_user_database_enabled.py @@ -6,7 +6,7 @@ class opensearch_service_domains_internal_user_database_enabled(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.metadata.json index ff576140..ce46b757 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.metadata.json @@ -1,35 +1,38 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_node_to_node_encryption_enabled", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have node-to-node encryption enabled", - "CheckType": ["Protect", "Data protection", "Encryption of data in transit"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "medium", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have node-to-node encryption enabled", - "Risk": "Node-to-node encryption provides an additional layer of security on top of the default features of Amazon ES. This architecture prevents potential attackers from intercepting traffic between Elasticsearch nodes and keeps the cluster secure.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/node-to-node-encryption.html", - "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_5#fix---builtime", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/node-to-node-encryption.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Node-to-node encryption on new domains requires Elasticsearch 6.0 or later. Enabling the feature on existing domains requires Elasticsearch 6.7 or later. Choose the existing domain in the AWS console; Actions; and Modify encryption.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/ntn.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_node_to_node_encryption_enabled", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have node-to-node encryption enabled", + "CheckType": [ + "Protect", + "Data protection", + "Encryption of data in transit" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "medium", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have node-to-node encryption enabled", + "Risk": "Node-to-node encryption provides an additional layer of security on top of the default features of Amazon ES. This architecture prevents potential attackers from intercepting traffic between Elasticsearch nodes and keeps the cluster secure.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/node-to-node-encryption.html", + "NativeIaC": "https://docs.bridgecrew.io/docs/elasticsearch_5#fix---builtime", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/node-to-node-encryption.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Node-to-node encryption on new domains requires Elasticsearch 6.0 or later. Enabling the feature on existing domains requires Elasticsearch 6.7 or later. Choose the existing domain in the AWS console; Actions; and Modify encryption.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/ntn.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.py b/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.py index 6a66e19e..dbb27385 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_node_to_node_encryption_enabled/opensearch_service_domains_node_to_node_encryption_enabled.py @@ -6,7 +6,7 @@ class opensearch_service_domains_node_to_node_encryption_enabled(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.metadata.json index 3660e55e..b4566e55 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_not_publicly_accessible", - "CheckTitle": "Check if Amazon Opensearch/Elasticsearch domains are set as Public or if it has open policy access", - "CheckType": ["Protect", "Secure Access Management"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "critical", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Opensearch/Elasticsearch domains are set as Public or if it has open policy access", - "Risk": "Publicly accessible services could expose sensitive data to bad actors.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/elasticsearch-domain-exposed.html", - "NativeIaC": "", - "Other": "https://docs.bridgecrew.io/docs/public_3#fix---runtime", - "Terraform": "" - }, - "Recommendation": { - "Text": "Use VPC endpoints for internal services.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_not_publicly_accessible", + "CheckTitle": "Check if Amazon Opensearch/Elasticsearch domains are set as Public or if it has open policy access", + "CheckType": [ + "Protect", + "Secure Access Management" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "critical", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Opensearch/Elasticsearch domains are set as Public or if it has open policy access", + "Risk": "Publicly accessible services could expose sensitive data to bad actors.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/elasticsearch-domain-exposed.html", + "NativeIaC": "", + "Other": "https://docs.bridgecrew.io/docs/public_3#fix---runtime", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Use VPC endpoints for internal services.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.py b/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.py index e6b311d0..17e5316b 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_not_publicly_accessible/opensearch_service_domains_not_publicly_accessible.py @@ -6,7 +6,7 @@ class opensearch_service_domains_not_publicly_accessible(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.metadata.json index 3daef2d4..181d4cfa 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_updated_to_the_latest_service_software_version", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have updates available", - "CheckType": ["Detect", "Vulnerability, patch, and version management"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "low", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have updates available", - "Risk": "Amazon ES regularly releases system software updates that add features or otherwise improve your domains.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/version.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/version.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "The Notifications panel in the console is the easiest way to see if an update is available or check the status of an update. You can also receive these notifications through Amazon EventBridge. If you take no action on required updates; Amazon ES still updates your domain service software automatically after a certain timeframe (typically two weeks). In this situation; Amazon ES sends notifications when it starts the update and when the update is complete.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-service-software.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_updated_to_the_latest_service_software_version", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains have updates available", + "CheckType": [ + "Detect", + "Vulnerability, patch, and version management" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "low", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains have updates available", + "Risk": "Amazon ES regularly releases system software updates that add features or otherwise improve your domains.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/version.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Elasticsearch/version.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "The Notifications panel in the console is the easiest way to see if an update is available or check the status of an update. You can also receive these notifications through Amazon EventBridge. If you take no action on required updates; Amazon ES still updates your domain service software automatically after a certain timeframe (typically two weeks). In this situation; Amazon ES sends notifications when it starts the update and when the update is complete.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-service-software.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.py b/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.py index 3fef4fee..a1b08235 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_updated_to_the_latest_service_software_version/opensearch_service_domains_updated_to_the_latest_service_software_version.py @@ -6,7 +6,7 @@ class opensearch_service_domains_updated_to_the_latest_service_software_version( def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.metadata.json b/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.metadata.json index 5ab6e144..0415512c 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.metadata.json +++ b/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.metadata.json @@ -1,35 +1,37 @@ { - "Provider": "aws", - "CheckID": "opensearch_service_domains_use_cognito_authentication_for_kibana", - "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains has Amazon Cognito authentication for Kibana enabled", - "CheckType": ["Identify", "Logging"], - "ServiceName": "opensearch", - "SubServiceName": "", - "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", - "Severity": "high", - "ResourceType": "AwsOpenSearchDomain", - "Description": "Check if Amazon Elasticsearch/Opensearch Service domains has Amazon Cognito authentication for Kibana enabled", - "Risk": "Amazon Elasticsearch Service supports Amazon Cognito for Kibana authentication.", - "RelatedUrl": "", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "If you do not configure Amazon Cognito authentication; you can still protect Kibana using an IP-based access policy and a proxy server; HTTP basic authentication; or SAML.", - "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html" - } + "Provider": "aws", + "CheckID": "opensearch_service_domains_use_cognito_authentication_for_kibana", + "CheckTitle": "Check if Amazon Elasticsearch/Opensearch Service domains has Amazon Cognito authentication for Kibana enabled", + "CheckType": [ + "Identify", + "Logging" + ], + "ServiceName": "opensearch", + "SubServiceName": "", + "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", + "Severity": "high", + "ResourceType": "AwsOpenSearchDomain", + "Description": "Check if Amazon Elasticsearch/Opensearch Service domains has Amazon Cognito authentication for Kibana enabled", + "Risk": "Amazon Elasticsearch Service supports Amazon Cognito for Kibana authentication.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "If you do not configure Amazon Cognito authentication; you can still protect Kibana using an IP-based access policy and a proxy server; HTTP basic authentication; or SAML.", + "Url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.py b/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.py index 3c75831b..aefe01ad 100644 --- a/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.py +++ b/providers/aws/services/opensearch/opensearch_service_domains_use_cognito_authentication_for_kibana/opensearch_service_domains_use_cognito_authentication_for_kibana.py @@ -6,7 +6,7 @@ class opensearch_service_domains_use_cognito_authentication_for_kibana(Check): def execute(self): findings = [] for domain in opensearch_client.opensearch_domains: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = domain.region report.resource_id = domain.name report.resource_arn = domain.arn diff --git a/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json index 2a607a48..1ea290fd 100644 --- a/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json +++ b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py index c3f86a0b..4e2aae9c 100644 --- a/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py +++ b/providers/aws/services/rds/rds_instance_backup_enabled/rds_instance_backup_enabled.py @@ -6,7 +6,7 @@ class rds_instance_backup_enabled(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if db_instance.backup_retention_period > 0: diff --git a/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json index 997b0094..fac7830e 100644 --- a/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json +++ b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py index 02eefd78..3df4db5c 100644 --- a/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py +++ b/providers/aws/services/rds/rds_instance_deletion_protection/rds_instance_deletion_protection.py @@ -6,7 +6,7 @@ class rds_instance_deletion_protection(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if db_instance.deletion_protection: diff --git a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json index 7b4b26f1..9ba52aa9 100644 --- a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json +++ b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py index 45b9c83e..7fdf03ea 100644 --- a/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py +++ b/providers/aws/services/rds/rds_instance_enhanced_monitoring_enabled/rds_instance_enhanced_monitoring_enabled.py @@ -6,7 +6,7 @@ class rds_instance_enhanced_monitoring_enabled(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if db_instance.enhanced_monitoring_arn: diff --git a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json index c02facbc..d2ce8baa 100644 --- a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json +++ b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py index 4db543ab..e4af35a6 100644 --- a/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py +++ b/providers/aws/services/rds/rds_instance_integration_cloudwatch_logs/rds_instance_integration_cloudwatch_logs.py @@ -6,7 +6,7 @@ class rds_instance_integration_cloudwatch_logs(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if db_instance.cloudwatch_logs: diff --git a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json index 8bf46ab8..2f68ed2b 100644 --- a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json +++ b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py index 4c22163b..449e98fe 100644 --- a/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py +++ b/providers/aws/services/rds/rds_instance_minor_version_upgrade_enabled/rds_instance_minor_version_upgrade_enabled.py @@ -6,7 +6,7 @@ class rds_instance_minor_version_upgrade_enabled(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if db_instance.auto_minor_version_upgrade: diff --git a/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json index 12f5ccb5..58b5183e 100644 --- a/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json +++ b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py index 0b77f0c2..b136a17f 100644 --- a/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py +++ b/providers/aws/services/rds/rds_instance_multi_az/rds_instance_multi_az.py @@ -6,7 +6,7 @@ class rds_instance_multi_az(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if db_instance.multi_az: diff --git a/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json index 57a76214..221e02c3 100644 --- a/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json +++ b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py index b1113bea..bd046ef9 100644 --- a/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py +++ b/providers/aws/services/rds/rds_instance_no_public_access/rds_instance_no_public_access.py @@ -6,7 +6,7 @@ class rds_instance_no_public_access(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if not db_instance.public: diff --git a/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json index 17b7933c..62ee4616 100644 --- a/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json +++ b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py index 7cea76cc..68fe0ebb 100644 --- a/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py +++ b/providers/aws/services/rds/rds_instance_storage_encrypted/rds_instance_storage_encrypted.py @@ -6,7 +6,7 @@ class rds_instance_storage_encrypted(Check): def execute(self): findings = [] for db_instance in rds_client.db_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_instance.region report.resource_id = db_instance.id if db_instance.encrypted: diff --git a/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json index 13fd6f4f..44803ffb 100644 --- a/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json +++ b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py index 362ea3af..1d0d089d 100644 --- a/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py +++ b/providers/aws/services/rds/rds_snapshots_public_access/rds_snapshots_public_access.py @@ -6,7 +6,7 @@ class rds_snapshots_public_access(Check): def execute(self): findings = [] for db_snap in rds_client.db_snapshots: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_snap.region report.resource_id = db_snap.id if db_snap.public: @@ -23,7 +23,7 @@ class rds_snapshots_public_access(Check): findings.append(report) for db_snap in rds_client.db_cluster_snapshots: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = db_snap.region report.resource_id = db_snap.id if db_snap.public: diff --git a/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.metadata.json b/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.metadata.json index b572a698..6d8df8fb 100644 --- a/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.metadata.json +++ b/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "redshift_cluster_audit_logging", - "CheckTitle": "Check if Redshift cluster has audit logging enabled", - "CheckType": [], - "ServiceName": "redshift", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", - "Severity": "medium", - "ResourceType": "AwsRedshiftCluster", - "Description": "Check if Redshift cluster has audit logging enabled", - "Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.", - "RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Redshift/redshift-cluster-audit-logging-enabled.html", - "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#cloudformation", - "Other": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#aws-console", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#terraform" - }, - "Recommendation": { - "Text": "Enable logs. Create an S3 lifecycle policy. Define use cases, metrics and automated responses where applicable.", - "Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html" - } + "Provider": "aws", + "CheckID": "redshift_cluster_audit_logging", + "CheckTitle": "Check if Redshift cluster has audit logging enabled", + "CheckType": [], + "ServiceName": "redshift", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", + "Severity": "medium", + "ResourceType": "AwsRedshiftCluster", + "Description": "Check if Redshift cluster has audit logging enabled", + "Risk": "If logs are not enabled; monitoring of service use and threat analysis is not possible.", + "RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Redshift/redshift-cluster-audit-logging-enabled.html", + "NativeIaC": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#cloudformation", + "Other": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#aws-console", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_logging_12#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable logs. Create an S3 lifecycle policy. Define use cases, metrics and automated responses where applicable.", + "Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.py b/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.py index d41960a4..d99e42a3 100644 --- a/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.py +++ b/providers/aws/services/redshift/redshift_cluster_audit_logging/redshift_cluster_audit_logging.py @@ -6,7 +6,7 @@ class redshift_cluster_audit_logging(Check): def execute(self): findings = [] for cluster in redshift_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.id report.resource_arn = cluster.arn diff --git a/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.metadata.json b/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.metadata.json index dc231ec9..19feb217 100644 --- a/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.metadata.json +++ b/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "redshift_cluster_automated_snapshot", - "CheckTitle": "Check if Redshift Clusters have automated snapshots enabled", - "CheckType": [], - "ServiceName": "redshift", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", - "Severity": "medium", - "ResourceType": "AwsRedshiftCluster", - "Description": "Check if Redshift Clusters have automated snapshots enabled", - "Risk": "If backup is not enabled, data is vulnerable. Human error or bad actors could erase or modify data.", - "RelatedUrl": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_Redshift.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach", - "Url": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_Redshift.html" - } + "Provider": "aws", + "CheckID": "redshift_cluster_automated_snapshot", + "CheckTitle": "Check if Redshift Clusters have automated snapshots enabled", + "CheckType": [], + "ServiceName": "redshift", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", + "Severity": "medium", + "ResourceType": "AwsRedshiftCluster", + "Description": "Check if Redshift Clusters have automated snapshots enabled", + "Risk": "If backup is not enabled, data is vulnerable. Human error or bad actors could erase or modify data.", + "RelatedUrl": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_Redshift.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable automated backup for production data. Define a retention period and periodically test backup restoration. A Disaster Recovery process should be in place to govern Data Protection approach", + "Url": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_Redshift.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.py b/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.py index f6c46fe2..86b0690b 100644 --- a/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.py +++ b/providers/aws/services/redshift/redshift_cluster_automated_snapshot/redshift_cluster_automated_snapshot.py @@ -6,7 +6,7 @@ class redshift_cluster_automated_snapshot(Check): def execute(self): findings = [] for cluster in redshift_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.id report.resource_arn = cluster.arn diff --git a/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.metadata.json b/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.metadata.json index 4cac2348..b5c6256e 100644 --- a/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.metadata.json +++ b/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "redshift_cluster_automatic_upgrades", - "CheckTitle": "Check for Publicly Accessible Redshift Clusters", - "CheckType": [], - "ServiceName": "redshift", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", - "Severity": "high", - "ResourceType": "AwsRedshiftCluster", - "Description": "Check for Publicly Accessible Redshift Clusters", - "Risk": "Without automatic version upgrade enabled; a critical Redshift Cluster version can become severly out of date", - "RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html", - "Remediation": { - "Code": { - "CLI": "aws redshift modify-cluster --cluster-identifier --allow-version-upgrade", - "NativeIaC": "https://docs.bridgecrew.io/docs/public_9#cloudformation", - "Other": "", - "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-redshift-clusters-allow-version-upgrade-by-default#terraform" - }, - "Recommendation": { - "Text": "Enabled AutomaticVersionUpgrade on Redshift Cluster", - "Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html" - } + "Provider": "aws", + "CheckID": "redshift_cluster_automatic_upgrades", + "CheckTitle": "Check for Publicly Accessible Redshift Clusters", + "CheckType": [], + "ServiceName": "redshift", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", + "Severity": "high", + "ResourceType": "AwsRedshiftCluster", + "Description": "Check for Publicly Accessible Redshift Clusters", + "Risk": "Without automatic version upgrade enabled; a critical Redshift Cluster version can become severly out of date", + "RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html", + "Remediation": { + "Code": { + "CLI": "aws redshift modify-cluster --cluster-identifier --allow-version-upgrade", + "NativeIaC": "https://docs.bridgecrew.io/docs/public_9#cloudformation", + "Other": "", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-redshift-clusters-allow-version-upgrade-by-default#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enabled AutomaticVersionUpgrade on Redshift Cluster", + "Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-cluster-operations.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.py b/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.py index f89b3101..0cb194b8 100644 --- a/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.py +++ b/providers/aws/services/redshift/redshift_cluster_automatic_upgrades/redshift_cluster_automatic_upgrades.py @@ -6,7 +6,7 @@ class redshift_cluster_automatic_upgrades(Check): def execute(self): findings = [] for cluster in redshift_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.id report.resource_arn = cluster.arn diff --git a/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.metadata.json b/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.metadata.json index 171fc7b6..c355a927 100644 --- a/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.metadata.json +++ b/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "redshift_cluster_public_access", - "CheckTitle": "Check for Publicly Accessible Redshift Clusters", - "CheckType": [], - "ServiceName": "redshift", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", - "Severity": "high", - "ResourceType": "AwsRedshiftCluster", - "Description": "Check for Publicly Accessible Redshift Clusters", - "Risk": "Publicly accessible services could expose sensitive data to bad actors.", - "RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Redshift/redshift-cluster-publicly-accessible.html", - "NativeIaC": "https://docs.bridgecrew.io/docs/public_9#cloudformation", - "Other": "https://docs.bridgecrew.io/docs/public_9#aws-console", - "Terraform": "https://docs.bridgecrew.io/docs/public_9#terraform" - }, - "Recommendation": { - "Text": "List all shared Redshift clusters and make sure there is a business reason for them.", - "Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html" - } + "Provider": "aws", + "CheckID": "redshift_cluster_public_access", + "CheckTitle": "Check for Publicly Accessible Redshift Clusters", + "CheckType": [], + "ServiceName": "redshift", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:redshift:region:account-id:cluster:cluster-name", + "Severity": "high", + "ResourceType": "AwsRedshiftCluster", + "Description": "Check for Publicly Accessible Redshift Clusters", + "Risk": "Publicly accessible services could expose sensitive data to bad actors.", + "RelatedUrl": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/Redshift/redshift-cluster-publicly-accessible.html", + "NativeIaC": "https://docs.bridgecrew.io/docs/public_9#cloudformation", + "Other": "https://docs.bridgecrew.io/docs/public_9#aws-console", + "Terraform": "https://docs.bridgecrew.io/docs/public_9#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "List all shared Redshift clusters and make sure there is a business reason for them.", + "Url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.py b/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.py index 661f2224..1f600e10 100644 --- a/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.py +++ b/providers/aws/services/redshift/redshift_cluster_public_access/redshift_cluster_public_access.py @@ -6,7 +6,7 @@ class redshift_cluster_public_access(Check): def execute(self): findings = [] for cluster in redshift_client.clusters: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = cluster.region report.resource_id = cluster.id report.resource_arn = cluster.arn diff --git a/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.metadata.json b/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.metadata.json index d7897b16..f1953cf6 100644 --- a/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.metadata.json +++ b/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.py b/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.py index 1c8ed8db..f4f9cc58 100644 --- a/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.py +++ b/providers/aws/services/route53/route53_domains_privacy_protection_enabled/route53_domains_privacy_protection_enabled.py @@ -7,7 +7,7 @@ class route53_domains_privacy_protection_enabled(Check): findings = [] for domain in route53domains_client.domains.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = domain.name report.region = domain.region diff --git a/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.metadata.json b/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.metadata.json index 997dbab1..9351f18c 100644 --- a/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.metadata.json +++ b/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.py b/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.py index f367e2b1..c38c8c22 100644 --- a/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.py +++ b/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.py @@ -7,7 +7,7 @@ class route53_domains_transferlock_enabled(Check): findings = [] for domain in route53domains_client.domains.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = domain.name report.region = domain.region diff --git a/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.metadata.json b/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.metadata.json index 6762b81b..ae33da49 100644 --- a/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.metadata.json +++ b/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.py b/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.py index eaa2e516..8f75139e 100644 --- a/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.py +++ b/providers/aws/services/route53/route53_public_hosted_zones_cloudwatch_logging_enabled/route53_public_hosted_zones_cloudwatch_logging_enabled.py @@ -8,7 +8,7 @@ class route53_public_hosted_zones_cloudwatch_logging_enabled(Check): for hosted_zone in route53_client.hosted_zones.values(): if not hosted_zone.private_zone: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.resource_id = hosted_zone.id report.region = hosted_zone.region if ( diff --git a/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json index 5efea66d..7f1c6838 100644 --- a/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json +++ b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.metadata.json @@ -32,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py index e92b7a19..7f06b161 100644 --- a/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py +++ b/providers/aws/services/s3/s3_account_level_public_access_blocks/s3_account_level_public_access_blocks.py @@ -6,7 +6,7 @@ from providers.aws.services.s3.s3control_client import s3control_client class s3_account_level_public_access_blocks(Check): def execute(self): findings = [] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "FAIL" report.status_extended = f"Block Public Access is not configured for the account {s3_client.audited_account}." report.region = s3control_client.region diff --git a/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.metadata.json b/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.metadata.json index f3700a9a..5ba7d03a 100644 --- a/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.metadata.json +++ b/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_acl_prohibited", "CheckTitle": "Check if S3 buckets have ACLs enabled", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.py b/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.py index db0fb02d..1bda0e88 100644 --- a/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.py +++ b/providers/aws/services/s3/s3_bucket_acl_prohibited/s3_bucket_acl_prohibited.py @@ -6,7 +6,7 @@ class s3_bucket_acl_prohibited(Check): def execute(self): findings = [] for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name report.status = "FAIL" diff --git a/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.metadata.json b/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.metadata.json index e2275569..c5cd7397 100644 --- a/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.metadata.json +++ b/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_default_encryption", "CheckTitle": "Check if S3 buckets have default encryption (SSE) enabled or use a bucket policy to enforce it.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.py b/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.py index f6d40f3a..a7e21ce6 100644 --- a/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.py +++ b/providers/aws/services/s3/s3_bucket_default_encryption/s3_bucket_default_encryption.py @@ -6,7 +6,7 @@ class s3_bucket_default_encryption(Check): def execute(self): findings = [] for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name if bucket.encryption: diff --git a/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.metadata.json b/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.metadata.json index 568eba2e..7d036da7 100644 --- a/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.metadata.json +++ b/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_no_mfa_delete", "CheckTitle": "Check if S3 bucket MFA Delete is not enabled.", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.py b/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.py index 7904ff06..c32a57b6 100644 --- a/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.py +++ b/providers/aws/services/s3/s3_bucket_no_mfa_delete/s3_bucket_no_mfa_delete.py @@ -6,7 +6,7 @@ class s3_bucket_no_mfa_delete(Check): def execute(self): findings = [] for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name if bucket.mfa_delete: diff --git a/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.metadata.json b/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.metadata.json index e8ac4e4c..714a0224 100644 --- a/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.metadata.json +++ b/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_object_versioning", "CheckTitle": "Check if S3 buckets have object versioning enabled", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.py b/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.py index e38f19cf..0839aadb 100644 --- a/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.py +++ b/providers/aws/services/s3/s3_bucket_object_versioning/s3_bucket_object_versioning.py @@ -6,7 +6,7 @@ class s3_bucket_object_versioning(Check): def execute(self): findings = [] for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name if bucket.versioning: diff --git a/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.metadata.json b/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.metadata.json index c9c34ca9..01366b19 100644 --- a/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.metadata.json +++ b/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_policy_public_write_access", "CheckTitle": "Check if S3 buckets have policies which allow WRITE access.", - "CheckType": ["IAM"], + "CheckType": [ + "IAM" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.py b/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.py index 3d0a03b9..4fdaab64 100644 --- a/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.py +++ b/providers/aws/services/s3/s3_bucket_policy_public_write_access/s3_bucket_policy_public_write_access.py @@ -6,7 +6,7 @@ class s3_bucket_policy_public_write_access(Check): def execute(self): findings = [] for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name # Check if bucket policy allow public write access diff --git a/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.metadata.json b/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.metadata.json index 091485be..c387634f 100644 --- a/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.metadata.json +++ b/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_public_access", "CheckTitle": "Ensure there are no S3 buckets open to Everyone or Any AWS user.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py b/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py index d8c3064a..30ea123e 100644 --- a/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py +++ b/providers/aws/services/s3/s3_bucket_public_access/s3_bucket_public_access.py @@ -12,7 +12,7 @@ class s3_bucket_public_access(Check): and s3control_client.account_public_access_block.ignore_public_acls and s3control_client.account_public_access_block.restrict_public_buckets ): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.status = "PASS" report.status_extended = "All S3 public access blocked at account level." report.region = s3control_client.region @@ -21,7 +21,7 @@ class s3_bucket_public_access(Check): else: # 2. If public access is not blocked at account level, check it at each bucket level for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name report.status = "PASS" diff --git a/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.metadata.json b/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.metadata.json index 8b48edd3..87e35ff8 100644 --- a/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.metadata.json +++ b/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_secure_transport_policy", "CheckTitle": "Check if S3 buckets have secure transport policy.", - "CheckType": ["Data Protection"], + "CheckType": [ + "Data Protection" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.py b/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.py index 8e1ccba5..27b751a8 100644 --- a/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.py +++ b/providers/aws/services/s3/s3_bucket_secure_transport_policy/s3_bucket_secure_transport_policy.py @@ -6,7 +6,7 @@ class s3_bucket_secure_transport_policy(Check): def execute(self): findings = [] for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name # Check if bucket policy enforces SSL diff --git a/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.metadata.json b/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.metadata.json index 2e57f033..6304555a 100644 --- a/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.metadata.json +++ b/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "s3_bucket_server_access_logging_enabled", "CheckTitle": "Check if S3 buckets have server access logging enabled", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "s3", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.py b/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.py index c77fcab3..0479b4e8 100644 --- a/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.py +++ b/providers/aws/services/s3/s3_bucket_server_access_logging_enabled/s3_bucket_server_access_logging_enabled.py @@ -6,7 +6,7 @@ class s3_bucket_server_access_logging_enabled(Check): def execute(self): findings = [] for bucket in s3_client.buckets: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = bucket.region report.resource_id = bucket.name if bucket.logging: diff --git a/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.metadata.json b/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.metadata.json index e3e3a804..d29e61e7 100644 --- a/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_models_network_isolation_enabled", - "CheckTitle": "Check if Amazon SageMaker Models have network isolation enabled", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:model", - "Severity": "medium", - "ResourceType": "AwsSageMakerModel", - "Description": "Check if Amazon SageMaker Models have network isolation enabled", - "Risk": "This could provide an avenue for unauthorized access to your data.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" - } + "Provider": "aws", + "CheckID": "sagemaker_models_network_isolation_enabled", + "CheckTitle": "Check if Amazon SageMaker Models have network isolation enabled", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:model", + "Severity": "medium", + "ResourceType": "AwsSageMakerModel", + "Description": "Check if Amazon SageMaker Models have network isolation enabled", + "Risk": "This could provide an avenue for unauthorized access to your data.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.py b/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.py index af6b4722..3bbb7fd3 100644 --- a/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.py +++ b/providers/aws/services/sagemaker/sagemaker_models_network_isolation_enabled/sagemaker_models_network_isolation_enabled.py @@ -6,7 +6,7 @@ class sagemaker_models_network_isolation_enabled(Check): def execute(self): findings = [] for model in sagemaker_client.sagemaker_models: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = model.region report.resource_id = model.name report.resource_arn = model.arn diff --git a/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.metadata.json b/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.metadata.json index 07bb1085..d2d2296b 100644 --- a/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_models_vpc_settings_configured", - "CheckTitle": "Check if Amazon SageMaker Models have VPC settings configured", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:model", - "Severity": "medium", - "ResourceType": "AwsSageMakerModel", - "Description": "Check if Amazon SageMaker Models have VPC settings configured", - "Risk": "This could provide an avenue for unauthorized access to your data.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" - } + "Provider": "aws", + "CheckID": "sagemaker_models_vpc_settings_configured", + "CheckTitle": "Check if Amazon SageMaker Models have VPC settings configured", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:model", + "Severity": "medium", + "ResourceType": "AwsSageMakerModel", + "Description": "Check if Amazon SageMaker Models have VPC settings configured", + "Risk": "This could provide an avenue for unauthorized access to your data.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.py b/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.py index 93beaa37..503618f6 100644 --- a/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.py +++ b/providers/aws/services/sagemaker/sagemaker_models_vpc_settings_configured/sagemaker_models_vpc_settings_configured.py @@ -6,7 +6,7 @@ class sagemaker_models_vpc_settings_configured(Check): def execute(self): findings = [] for model in sagemaker_client.sagemaker_models: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = model.region report.resource_id = model.name report.resource_arn = model.arn diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.metadata.json b/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.metadata.json index 3d146192..e3c5e505 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_notebook_instance_encryption_enabled", - "CheckTitle": "Check if Amazon SageMaker Notebook instances have data encryption enabled", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", - "Severity": "medium", - "ResourceType": "AwsSageMakerNotebookInstance", - "Description": "Check if Amazon SageMaker Notebook instances have data encryption enabled", - "Risk": "Data exfiltration could happen if information is not protected. KMS keys provide additional security level to IAM policies.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-data-encrypted.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-data-encrypted.html", - "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_general_40#fix---buildtime" - }, - "Recommendation": { - "Text": "Specify AWS KMS keys to use for input and output from S3 and EBS.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html" - } + "Provider": "aws", + "CheckID": "sagemaker_notebook_instance_encryption_enabled", + "CheckTitle": "Check if Amazon SageMaker Notebook instances have data encryption enabled", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", + "Severity": "medium", + "ResourceType": "AwsSageMakerNotebookInstance", + "Description": "Check if Amazon SageMaker Notebook instances have data encryption enabled", + "Risk": "Data exfiltration could happen if information is not protected. KMS keys provide additional security level to IAM policies.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-data-encrypted.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-data-encrypted.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_aws_general_40#fix---buildtime" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Specify AWS KMS keys to use for input and output from S3 and EBS.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.py b/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.py index c3c7b63b..1b4ea933 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.py +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_encryption_enabled/sagemaker_notebook_instance_encryption_enabled.py @@ -6,7 +6,7 @@ class sagemaker_notebook_instance_encryption_enabled(Check): def execute(self): findings = [] for notebook_instance in sagemaker_client.sagemaker_notebook_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = notebook_instance.region report.resource_id = notebook_instance.name report.resource_arn = notebook_instance.arn diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.metadata.json b/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.metadata.json index 726bf706..9c06ca43 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_notebook_instance_root_access_disabled", - "CheckTitle": "Check if Amazon SageMaker Notebook instances have root access disabled", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", - "Severity": "medium", - "ResourceType": "AwsSageMakerNotebookInstance", - "Description": "Check if Amazon SageMaker Notebook instances have root access disabled", - "Risk": "Users with root access have administrator privileges; users can access and edit all files on a notebook instance with root access enabled", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Set the RootAccess field to Disabled. You can also disable root access for users when you create or update a notebook instance in the Amazon SageMaker console.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access.html" - } + "Provider": "aws", + "CheckID": "sagemaker_notebook_instance_root_access_disabled", + "CheckTitle": "Check if Amazon SageMaker Notebook instances have root access disabled", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", + "Severity": "medium", + "ResourceType": "AwsSageMakerNotebookInstance", + "Description": "Check if Amazon SageMaker Notebook instances have root access disabled", + "Risk": "Users with root access have administrator privileges; users can access and edit all files on a notebook instance with root access enabled", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Set the RootAccess field to Disabled. You can also disable root access for users when you create or update a notebook instance in the Amazon SageMaker console.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.py b/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.py index a41d4c6c..d61e6664 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.py +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_root_access_disabled/sagemaker_notebook_instance_root_access_disabled.py @@ -6,7 +6,7 @@ class sagemaker_notebook_instance_root_access_disabled(Check): def execute(self): findings = [] for notebook_instance in sagemaker_client.sagemaker_notebook_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = notebook_instance.region report.resource_id = notebook_instance.name report.resource_arn = notebook_instance.arn diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.metadata.json b/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.metadata.json index 0e20b6f7..c4fddd3f 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_notebook_instance_vpc_settings_configured", - "CheckTitle": "Check if Amazon SageMaker Notebook instances have VPC settings configured", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", - "Severity": "medium", - "ResourceType": "AwsSageMakerNotebookInstance", - "Description": "Check if Amazon SageMaker Notebook instances have VPC settings configured", - "Risk": "This could provide an avenue for unauthorized access to your data.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-instance-in-vpc.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-instance-in-vpc.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing..", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" - } + "Provider": "aws", + "CheckID": "sagemaker_notebook_instance_vpc_settings_configured", + "CheckTitle": "Check if Amazon SageMaker Notebook instances have VPC settings configured", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", + "Severity": "medium", + "ResourceType": "AwsSageMakerNotebookInstance", + "Description": "Check if Amazon SageMaker Notebook instances have VPC settings configured", + "Risk": "This could provide an avenue for unauthorized access to your data.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-instance-in-vpc.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-instance-in-vpc.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing..", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.py b/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.py index ad33584e..51d0fe10 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.py +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_vpc_settings_configured/sagemaker_notebook_instance_vpc_settings_configured.py @@ -6,7 +6,7 @@ class sagemaker_notebook_instance_vpc_settings_configured(Check): def execute(self): findings = [] for notebook_instance in sagemaker_client.sagemaker_notebook_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = notebook_instance.region report.resource_id = notebook_instance.name report.resource_arn = notebook_instance.arn diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.metadata.json b/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.metadata.json index 6d7d4ec0..f62f6516 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_notebook_instance_without_direct_internet_access_configured", - "CheckTitle": "Check if Amazon SageMaker Notebook instances have direct internet access", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", - "Severity": "medium", - "ResourceType": "AwsSageMakerNotebookInstance", - "Description": "Check if Amazon SageMaker Notebook instances have direct internet access", - "Risk": "This could provide an avenue for unauthorized access to your data.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-direct-internet-access.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-direct-internet-access.html", - "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-direct-internet-access-is-disabled-for-an-amazon-sagemaker-notebook-instance#fix---buildtime" - }, - "Recommendation": { - "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" - } + "Provider": "aws", + "CheckID": "sagemaker_notebook_instance_without_direct_internet_access_configured", + "CheckTitle": "Check if Amazon SageMaker Notebook instances have direct internet access", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:notebook-instance", + "Severity": "medium", + "ResourceType": "AwsSageMakerNotebookInstance", + "Description": "Check if Amazon SageMaker Notebook instances have direct internet access", + "Risk": "This could provide an avenue for unauthorized access to your data.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-direct-internet-access.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SageMaker/notebook-direct-internet-access.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-direct-internet-access-is-disabled-for-an-amazon-sagemaker-notebook-instance#fix---buildtime" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.py b/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.py index 880e40be..82d078f8 100644 --- a/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.py +++ b/providers/aws/services/sagemaker/sagemaker_notebook_instance_without_direct_internet_access_configured/sagemaker_notebook_instance_without_direct_internet_access_configured.py @@ -6,7 +6,7 @@ class sagemaker_notebook_instance_without_direct_internet_access_configured(Chec def execute(self): findings = [] for notebook_instance in sagemaker_client.sagemaker_notebook_instances: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = notebook_instance.region report.resource_id = notebook_instance.name report.resource_arn = notebook_instance.arn diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.metadata.json b/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.metadata.json index 3d25efdf..a4f330ef 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_training_jobs_intercontainer_encryption_enabled", - "CheckTitle": "Check if Amazon SageMaker Training jobs have intercontainer encryption enabled", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", - "Severity": "medium", - "ResourceType": "AwsSageMakerTrainingJob", - "Description": "Check if Amazon SageMaker Training jobs have intercontainer encryption enabled", - "Risk": "If not restricted unintended access could happen.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Internetwork communications support TLS 1.2 encryption between all components and clients.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" - } + "Provider": "aws", + "CheckID": "sagemaker_training_jobs_intercontainer_encryption_enabled", + "CheckTitle": "Check if Amazon SageMaker Training jobs have intercontainer encryption enabled", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", + "Severity": "medium", + "ResourceType": "AwsSageMakerTrainingJob", + "Description": "Check if Amazon SageMaker Training jobs have intercontainer encryption enabled", + "Risk": "If not restricted unintended access could happen.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Internetwork communications support TLS 1.2 encryption between all components and clients.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.py b/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.py index caeb4d2a..19bb4aac 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.py +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_intercontainer_encryption_enabled/sagemaker_training_jobs_intercontainer_encryption_enabled.py @@ -6,7 +6,7 @@ class sagemaker_training_jobs_intercontainer_encryption_enabled(Check): def execute(self): findings = [] for training_job in sagemaker_client.sagemaker_training_jobs: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = training_job.region report.resource_id = training_job.name report.resource_arn = training_job.arn diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.metadata.json b/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.metadata.json index 2f011ed5..f4c5acda 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_training_jobs_network_isolation_enabled", - "CheckTitle": "Check if Amazon SageMaker Training jobs have network isolation enabled", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", - "Severity": "medium", - "ResourceType": "AwsSageMakerTrainingJob", - "Description": "Check if Amazon SageMaker Training jobs have network isolation enabled", - "Risk": "This could provide an avenue for unauthorized access to your data.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" - } + "Provider": "aws", + "CheckID": "sagemaker_training_jobs_network_isolation_enabled", + "CheckTitle": "Check if Amazon SageMaker Training jobs have network isolation enabled", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", + "Severity": "medium", + "ResourceType": "AwsSageMakerTrainingJob", + "Description": "Check if Amazon SageMaker Training jobs have network isolation enabled", + "Risk": "This could provide an avenue for unauthorized access to your data.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.py b/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.py index c26ea22b..a48c543e 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.py +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_network_isolation_enabled/sagemaker_training_jobs_network_isolation_enabled.py @@ -6,7 +6,7 @@ class sagemaker_training_jobs_network_isolation_enabled(Check): def execute(self): findings = [] for training_job in sagemaker_client.sagemaker_training_jobs: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = training_job.region report.resource_id = training_job.name report.resource_arn = training_job.arn diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.metadata.json b/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.metadata.json index bd62fb55..59584128 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_training_jobs_volume_and_output_encryption_enabled", - "CheckTitle": "Check if Amazon SageMaker Training jobs have volume and output with KMS encryption enabled", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", - "Severity": "medium", - "ResourceType": "AwsSageMakerTrainingJob", - "Description": "Check if Amazon SageMaker Training jobs have volume and output with KMS encryption enabled", - "Risk": "Data exfiltration could happen if information is not protected. KMS keys provide additional security level to IAM policies.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Specify AWS KMS keys to use for input and output from S3 and EBS.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html" - } + "Provider": "aws", + "CheckID": "sagemaker_training_jobs_volume_and_output_encryption_enabled", + "CheckTitle": "Check if Amazon SageMaker Training jobs have volume and output with KMS encryption enabled", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", + "Severity": "medium", + "ResourceType": "AwsSageMakerTrainingJob", + "Description": "Check if Amazon SageMaker Training jobs have volume and output with KMS encryption enabled", + "Risk": "Data exfiltration could happen if information is not protected. KMS keys provide additional security level to IAM policies.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Specify AWS KMS keys to use for input and output from S3 and EBS.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/key-management.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.py b/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.py index d6ab7e9e..018f435d 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.py +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_volume_and_output_encryption_enabled/sagemaker_training_jobs_volume_and_output_encryption_enabled.py @@ -6,7 +6,7 @@ class sagemaker_training_jobs_volume_and_output_encryption_enabled(Check): def execute(self): findings = [] for training_job in sagemaker_client.sagemaker_training_jobs: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = training_job.region report.resource_id = training_job.name report.resource_arn = training_job.arn diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.metadata.json b/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.metadata.json index f3928956..0e9078b2 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.metadata.json +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sagemaker_training_jobs_vpc_settings_configured", - "CheckTitle": "Check if Amazon SageMaker Training job have VPC settings configured.", - "CheckType": [], - "ServiceName": "sagemaker", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", - "Severity": "medium", - "ResourceType": "AwsSageMakerTrainingJob", - "Description": "Check if Amazon SageMaker Training job have VPC settings configured.", - "Risk": "This could provide an avenue for unauthorized access to your data.", - "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "", - "Terraform": "" - }, - "Recommendation": { - "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", - "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" - } + "Provider": "aws", + "CheckID": "sagemaker_training_jobs_vpc_settings_configured", + "CheckTitle": "Check if Amazon SageMaker Training job have VPC settings configured.", + "CheckType": [], + "ServiceName": "sagemaker", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sagemaker:region:account-id:training-job", + "Severity": "medium", + "ResourceType": "AwsSageMakerTrainingJob", + "Description": "Check if Amazon SageMaker Training job have VPC settings configured.", + "Risk": "This could provide an avenue for unauthorized access to your data.", + "RelatedUrl": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Restrict which traffic can access by launching Studio in a Virtual Private Cloud (VPC) of your choosing.", + "Url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.py b/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.py index 6cf85182..7fc5b46d 100644 --- a/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.py +++ b/providers/aws/services/sagemaker/sagemaker_training_jobs_vpc_settings_configured/sagemaker_training_jobs_vpc_settings_configured.py @@ -6,7 +6,7 @@ class sagemaker_training_jobs_vpc_settings_configured(Check): def execute(self): findings = [] for training_job in sagemaker_client.sagemaker_training_jobs: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = training_job.region report.resource_id = training_job.name report.resource_arn = training_job.arn diff --git a/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.metadata.json b/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.metadata.json index 190b7333..3a7e2f5b 100644 --- a/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.metadata.json +++ b/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "Infrastructure Protection", - "Compliance": [] -} \ No newline at end of file + "Notes": "Infrastructure Protection" +} diff --git a/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.py b/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.py index 941321a3..b11f1ee7 100644 --- a/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.py +++ b/providers/aws/services/secretsmanager/secretsmanager_automatic_rotation_enabled/secretsmanager_automatic_rotation_enabled.py @@ -8,7 +8,7 @@ class secretsmanager_automatic_rotation_enabled(Check): def execute(self): findings = [] for secret in secretsmanager_client.secrets.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = secret.region report.resource_id = secret.name report.resource_arn = secret.arn diff --git a/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.metadata.json b/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.metadata.json index 19e87a61..f14d83fb 100644 --- a/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.metadata.json +++ b/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "securityhub_enabled", "CheckTitle": "Check if Security Hub is enabled and its standard subscriptions.", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "securityhub", "SubServiceName": "", "ResourceIdTemplate": "arn:partition:securityhub:region:account-id:hub/hub-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.py b/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.py index b922a188..536f914a 100644 --- a/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.py +++ b/providers/aws/services/securityhub/securityhub_enabled/securityhub_enabled.py @@ -6,7 +6,7 @@ class securityhub_enabled(Check): def execute(self): findings = [] for securityhub in securityhub_client.securityhubs: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = securityhub.region if securityhub.status == "ACTIVE": report.status = "PASS" diff --git a/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.metadata.json b/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.metadata.json index 88f079f7..c3bf36b0 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.metadata.json +++ b/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.py b/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.py index 06049628..47f647e8 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.py +++ b/providers/aws/services/shield/shield_advanced_protection_in_associated_elastic_ips/shield_advanced_protection_in_associated_elastic_ips.py @@ -1,6 +1,6 @@ from lib.check.models import Check, Check_Report -from providers.aws.services.shield.shield_client import shield_client from providers.aws.services.ec2.ec2_client import ec2_client +from providers.aws.services.shield.shield_client import shield_client class shield_advanced_protection_in_associated_elastic_ips(Check): @@ -8,7 +8,7 @@ class shield_advanced_protection_in_associated_elastic_ips(Check): findings = [] if shield_client.enabled: for elastic_ip in ec2_client.elastic_ips: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = shield_client.region report.resource_id = elastic_ip.allocation_id report.resource_arn = elastic_ip.arn diff --git a/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.metadata.json b/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.metadata.json index 6513d48a..db77cfef 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.metadata.json +++ b/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.metadata.json @@ -1,6 +1,6 @@ { "Provider": "aws", - "CheckID": "shield_advanced_protection_in_associated_elastic_ips", + "CheckID": "shield_advanced_protection_in_classic_load_balancers", "CheckTitle": "Check if Classic Load Balancers are protected by AWS Shield Advanced.", "CheckType": [], "ServiceName": "shield", @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.py b/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.py index e69b3b25..0c1dc8c5 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.py +++ b/providers/aws/services/shield/shield_advanced_protection_in_classic_load_balancers/shield_advanced_protection_in_classic_load_balancers.py @@ -1,6 +1,6 @@ from lib.check.models import Check, Check_Report -from providers.aws.services.shield.shield_client import shield_client from providers.aws.services.elb.elb_client import elb_client +from providers.aws.services.shield.shield_client import shield_client class shield_advanced_protection_in_classic_load_balancers(Check): @@ -8,7 +8,7 @@ class shield_advanced_protection_in_classic_load_balancers(Check): findings = [] if shield_client.enabled: for elb in elb_client.loadbalancers: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = shield_client.region report.resource_id = elb.name report.resource_arn = elb.arn diff --git a/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.metadata.json b/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.metadata.json index 3638930f..c09dcf84 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.metadata.json +++ b/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.py b/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.py index 7e3bf5af..b99c65b7 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.py +++ b/providers/aws/services/shield/shield_advanced_protection_in_cloudfront_distributions/shield_advanced_protection_in_cloudfront_distributions.py @@ -1,6 +1,6 @@ from lib.check.models import Check, Check_Report -from providers.aws.services.shield.shield_client import shield_client from providers.aws.services.cloudfront.cloudfront_client import cloudfront_client +from providers.aws.services.shield.shield_client import shield_client class shield_advanced_protection_in_cloudfront_distributions(Check): @@ -8,7 +8,7 @@ class shield_advanced_protection_in_cloudfront_distributions(Check): findings = [] if shield_client.enabled: for distribution in cloudfront_client.distributions.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = shield_client.region report.resource_id = distribution.id report.resource_arn = distribution.arn diff --git a/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.metadata.json b/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.metadata.json index 6ed9608a..13ae15c0 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.metadata.json +++ b/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.py b/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.py index c602904e..2303a544 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.py +++ b/providers/aws/services/shield/shield_advanced_protection_in_global_accelerators/shield_advanced_protection_in_global_accelerators.py @@ -1,8 +1,8 @@ from lib.check.models import Check, Check_Report -from providers.aws.services.shield.shield_client import shield_client from providers.aws.services.globalaccelerator.globalaccelerator_client import ( globalaccelerator_client, ) +from providers.aws.services.shield.shield_client import shield_client class shield_advanced_protection_in_global_accelerators(Check): @@ -10,7 +10,7 @@ class shield_advanced_protection_in_global_accelerators(Check): findings = [] if shield_client.enabled: for accelerator in globalaccelerator_client.accelerators.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = shield_client.region report.resource_id = accelerator.name report.resource_arn = accelerator.arn diff --git a/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.metadata.json b/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.metadata.json index 0189ca74..83226ac2 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.metadata.json +++ b/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.py b/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.py index 6c8fa0ee..4e4ad71f 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.py +++ b/providers/aws/services/shield/shield_advanced_protection_in_internet_facing_load_balancers/shield_advanced_protection_in_internet_facing_load_balancers.py @@ -1,8 +1,6 @@ from lib.check.models import Check, Check_Report +from providers.aws.services.elbv2.elbv2_client import elbv2_client from providers.aws.services.shield.shield_client import shield_client -from providers.aws.services.elbv2.elbv2_client import ( - elbv2_client, -) class shield_advanced_protection_in_internet_facing_load_balancers(Check): @@ -11,7 +9,7 @@ class shield_advanced_protection_in_internet_facing_load_balancers(Check): if shield_client.enabled: for elbv2 in elbv2_client.loadbalancersv2: if elbv2.type == "application" and elbv2.scheme == "internet-facing": - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = shield_client.region report.resource_id = elbv2.name report.resource_arn = elbv2.arn diff --git a/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.metadata.json b/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.metadata.json index aaa0561c..d520268a 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.metadata.json +++ b/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] -} \ No newline at end of file + "Notes": "" +} diff --git a/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.py b/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.py index 6e60e3ee..31feeb59 100644 --- a/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.py +++ b/providers/aws/services/shield/shield_advanced_protection_in_route53_hosted_zones/shield_advanced_protection_in_route53_hosted_zones.py @@ -1,8 +1,6 @@ from lib.check.models import Check, Check_Report +from providers.aws.services.route53.route53_client import route53_client from providers.aws.services.shield.shield_client import shield_client -from providers.aws.services.route53.route53_client import ( - route53_client, -) class shield_advanced_protection_in_route53_hosted_zones(Check): @@ -10,7 +8,7 @@ class shield_advanced_protection_in_route53_hosted_zones(Check): findings = [] if shield_client.enabled: for hosted_zone in route53_client.hosted_zones.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = shield_client.region report.resource_id = hosted_zone.id report.resource_arn = hosted_zone.arn diff --git a/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.metadata.json b/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.metadata.json index 2ff97411..aa1c9681 100644 --- a/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.metadata.json +++ b/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sns_topics_kms_encryption_at_rest_enabled", - "CheckTitle": "Ensure there are no SNS Topics unencrypted", - "CheckType": [], - "ServiceName": "sns", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sns:region:account-id:topic", - "Severity": "high", - "ResourceType": "AwsSNSTopic", - "Description": "Ensure there are no SNS Topics unencrypted", - "Risk": "If not enabled sensitive information at rest is not protected.", - "RelatedUrl": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html", - "Remediation": { - "Code": { - "CLI": "aws sns set-topic-attributes --topic-arn --attribute-name 'KmsMasterKeyId' --attribute-value ", - "NativeIaC": "https://docs.bridgecrew.io/docs/general_15#cloudformation", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topic-encrypted-with-kms-customer-master-keys.html", - "Terraform": "https://docs.bridgecrew.io/docs/general_15#terraform" - }, - "Recommendation": { - "Text": "Use Amazon SNS with AWS KMS.", - "Url": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html" - } + "Provider": "aws", + "CheckID": "sns_topics_kms_encryption_at_rest_enabled", + "CheckTitle": "Ensure there are no SNS Topics unencrypted", + "CheckType": [], + "ServiceName": "sns", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sns:region:account-id:topic", + "Severity": "high", + "ResourceType": "AwsSNSTopic", + "Description": "Ensure there are no SNS Topics unencrypted", + "Risk": "If not enabled sensitive information at rest is not protected.", + "RelatedUrl": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html", + "Remediation": { + "Code": { + "CLI": "aws sns set-topic-attributes --topic-arn --attribute-name 'KmsMasterKeyId' --attribute-value ", + "NativeIaC": "https://docs.bridgecrew.io/docs/general_15#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topic-encrypted-with-kms-customer-master-keys.html", + "Terraform": "https://docs.bridgecrew.io/docs/general_15#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Use Amazon SNS with AWS KMS.", + "Url": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.py b/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.py index 66e2f26d..f8b73c22 100644 --- a/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.py +++ b/providers/aws/services/sns/sns_topics_kms_encryption_at_rest_enabled/sns_topics_kms_encryption_at_rest_enabled.py @@ -6,7 +6,7 @@ class sns_topics_kms_encryption_at_rest_enabled(Check): def execute(self): findings = [] for topic in sns_client.topics: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = topic.region report.resource_id = topic.name report.resource_arn = topic.arn diff --git a/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.metadata.json b/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.metadata.json index b333ed5f..954e412e 100644 --- a/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.metadata.json +++ b/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sns_topics_not_publicly_accessible", - "CheckTitle": "Check if SNS topics have policy set as Public", - "CheckType": [], - "ServiceName": "sns", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sns:region:account-id:topic", - "Severity": "high", - "ResourceType": "AwsSNSTopic", - "Description": "Check if SNS topics have policy set as Public", - "Risk": "Publicly accessible services could expose sensitive data to bad actors.", - "RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topics-everyone-publish.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topics-everyone-publish.html", - "Terraform": "https://docs.bridgecrew.io/docs/ensure-sns-topic-policy-is-not-public-by-only-allowing-specific-services-or-principals-to-access-it#terraform" - }, - "Recommendation": { - "Text": "Ensure there is a business requirement for service to be public.", - "Url": "https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html" - } + "Provider": "aws", + "CheckID": "sns_topics_not_publicly_accessible", + "CheckTitle": "Check if SNS topics have policy set as Public", + "CheckType": [], + "ServiceName": "sns", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sns:region:account-id:topic", + "Severity": "high", + "ResourceType": "AwsSNSTopic", + "Description": "Check if SNS topics have policy set as Public", + "Risk": "Publicly accessible services could expose sensitive data to bad actors.", + "RelatedUrl": "https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topics-everyone-publish.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SNS/topics-everyone-publish.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-sns-topic-policy-is-not-public-by-only-allowing-specific-services-or-principals-to-access-it#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Ensure there is a business requirement for service to be public.", + "Url": "https://docs.aws.amazon.com/config/latest/developerguide/sns-topic-policy.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py b/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py index 9bf7b4d6..30dce67e 100644 --- a/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py +++ b/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py @@ -6,7 +6,7 @@ class sns_topics_not_publicly_accessible(Check): def execute(self): findings = [] for topic in sns_client.topics: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = topic.region report.resource_id = topic.name report.resource_arn = topic.arn diff --git a/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.metadata.json b/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.metadata.json index 8f0df552..30c5d46b 100644 --- a/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.metadata.json +++ b/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sqs_queues_not_publicly_accessible", - "CheckTitle": "Check if SQS queues have policy set as Public", - "CheckType": [], - "ServiceName": "sqs", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sqs:region:account-id:queue", - "Severity": "critical", - "ResourceType": "AwsSqsQueue", - "Description": "Check if SQS queues have policy set as Public", - "Risk": "Sensitive information could be disclosed", - "RelatedUrl": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-basic-examples-of-sqs-policies.html", - "Remediation": { - "Code": { - "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SQS/sqs-queue-exposed.html", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SQS/sqs-queue-exposed.html", - "Terraform": "https://docs.bridgecrew.io/docs/ensure-sqs-queue-policy-is-not-public-by-only-allowing-specific-services-or-principals-to-access-it#terraform" - }, - "Recommendation": { - "Text": "Review service with overly permissive policies. Adhere to Principle of Least Privilege.", - "Url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-basic-examples-of-sqs-policies.html" - } + "Provider": "aws", + "CheckID": "sqs_queues_not_publicly_accessible", + "CheckTitle": "Check if SQS queues have policy set as Public", + "CheckType": [], + "ServiceName": "sqs", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sqs:region:account-id:queue", + "Severity": "critical", + "ResourceType": "AwsSqsQueue", + "Description": "Check if SQS queues have policy set as Public", + "Risk": "Sensitive information could be disclosed", + "RelatedUrl": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-basic-examples-of-sqs-policies.html", + "Remediation": { + "Code": { + "CLI": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SQS/sqs-queue-exposed.html", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SQS/sqs-queue-exposed.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-sqs-queue-policy-is-not-public-by-only-allowing-specific-services-or-principals-to-access-it#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Review service with overly permissive policies. Adhere to Principle of Least Privilege.", + "Url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-basic-examples-of-sqs-policies.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.py b/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.py index b36699a2..d659e645 100644 --- a/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.py +++ b/providers/aws/services/sqs/sqs_queues_not_publicly_accessible/sqs_queues_not_publicly_accessible.py @@ -6,7 +6,7 @@ class sqs_queues_not_publicly_accessible(Check): def execute(self): findings = [] for queue in sqs_client.queues: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = queue.region report.resource_id = queue.id report.resource_arn = queue.arn diff --git a/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.metadata.json b/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.metadata.json index 84a82d64..e6f1c8d5 100644 --- a/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.metadata.json +++ b/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "sqs_queues_server_side_encryption_enabled", - "CheckTitle": "Check if SQS queues have Server Side Encryption enabled", - "CheckType": [], - "ServiceName": "sqs", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:sqs:region:account-id:queue", - "Severity": "medium", - "ResourceType": "AwsSqsQueue", - "Description": "Check if SQS queues have Server Side Encryption enabled", - "Risk": "If not enabled sensitive information in transit is not protected.", - "RelatedUrl": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html", - "Remediation": { - "Code": { - "CLI": "aws sqs set-queue-attributes --queue-url --attributes KmsMasterKeyId=", - "NativeIaC": "https://docs.bridgecrew.io/docs/general_16-encrypt-sqs-queue#cloudformation", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SQS/queue-encrypted-with-kms-customer-master-keys.html", - "Terraform": "https://docs.bridgecrew.io/docs/general_16-encrypt-sqs-queue#terraform" - }, - "Recommendation": { - "Text": "Enable Encryption. Use a CMK where possible. It will provide additional management and privacy benefits", - "Url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html" - } + "Provider": "aws", + "CheckID": "sqs_queues_server_side_encryption_enabled", + "CheckTitle": "Check if SQS queues have Server Side Encryption enabled", + "CheckType": [], + "ServiceName": "sqs", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:sqs:region:account-id:queue", + "Severity": "medium", + "ResourceType": "AwsSqsQueue", + "Description": "Check if SQS queues have Server Side Encryption enabled", + "Risk": "If not enabled sensitive information in transit is not protected.", + "RelatedUrl": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html", + "Remediation": { + "Code": { + "CLI": "aws sqs set-queue-attributes --queue-url --attributes KmsMasterKeyId=", + "NativeIaC": "https://docs.bridgecrew.io/docs/general_16-encrypt-sqs-queue#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/SQS/queue-encrypted-with-kms-customer-master-keys.html", + "Terraform": "https://docs.bridgecrew.io/docs/general_16-encrypt-sqs-queue#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Enable Encryption. Use a CMK where possible. It will provide additional management and privacy benefits", + "Url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.py b/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.py index 9bf57a74..e11a76b6 100644 --- a/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.py +++ b/providers/aws/services/sqs/sqs_queues_server_side_encryption_enabled/sqs_queues_server_side_encryption_enabled.py @@ -6,7 +6,7 @@ class sqs_queues_server_side_encryption_enabled(Check): def execute(self): findings = [] for queue in sqs_client.queues: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = queue.region report.resource_id = queue.id report.resource_arn = queue.arn diff --git a/providers/aws/services/sqs/sqs_service.py b/providers/aws/services/sqs/sqs_service.py index 5af7b40d..b74c98c1 100644 --- a/providers/aws/services/sqs/sqs_service.py +++ b/providers/aws/services/sqs/sqs_service.py @@ -34,13 +34,14 @@ class SQS: try: list_queues_paginator = regional_client.get_paginator("list_queues") for page in list_queues_paginator.paginate(): - for queue in page["QueueUrls"]: - self.queues.append( - Queue( - id=queue, - region=regional_client.region, + if "QueueUrls" in page: + for queue in page["QueueUrls"]: + self.queues.append( + Queue( + id=queue, + region=regional_client.region, + ) ) - ) except Exception as error: logger.error( f"{regional_client.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" diff --git a/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.metadata.json b/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.metadata.json index a0759c87..8f6d66e6 100644 --- a/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.metadata.json +++ b/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.py b/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.py index ee1cd76b..f0fe64e2 100644 --- a/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.py +++ b/providers/aws/services/ssm/ssm_document_secrets/ssm_document_secrets.py @@ -13,7 +13,7 @@ class ssm_document_secrets(Check): def execute(self): findings = [] for document in ssm_client.documents.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = document.region report.resource_arn = f"arn:aws:ssm:{document.region}:{ssm_client.audited_account}:document/{document.name}" report.resource_id = document.name diff --git a/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.metadata.json b/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.metadata.json index 5fd61486..89b52ce8 100644 --- a/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.metadata.json +++ b/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.py b/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.py index 36223df5..ac984d60 100644 --- a/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.py +++ b/providers/aws/services/ssm/ssm_documents_set_as_public/ssm_documents_set_as_public.py @@ -6,7 +6,7 @@ class ssm_documents_set_as_public(Check): def execute(self): findings = [] for document in ssm_client.documents.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = document.region report.resource_arn = f"arn:aws:ssm:{document.region}:{ssm_client.audited_account}:document/{document.name}" report.resource_id = document.name diff --git a/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.metadata.json b/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.metadata.json index 2ac20f45..44d5bd2c 100644 --- a/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.metadata.json +++ b/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.metadata.json @@ -30,6 +30,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.py b/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.py index e865ac8b..81a98104 100644 --- a/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.py +++ b/providers/aws/services/ssm/ssm_managed_compliant_patching/ssm_managed_compliant_patching.py @@ -7,7 +7,7 @@ class ssm_managed_compliant_patching(Check): def execute(self): findings = [] for resource in ssm_client.compliance_resources.values(): - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = resource.region report.resource_arn = f"arn:aws:ec2:{resource.region}:{ssm_client.audited_account}:instance/{resource.id}" report.resource_id = resource.id diff --git a/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.metadata.json b/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.metadata.json index d2a282bd..99c12cb6 100644 --- a/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.metadata.json +++ b/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "trustedadvisor_errors_and_warnings", - "CheckTitle": "Check Trusted Advisor for errors and warnings.", - "CheckType": [], - "ServiceName": "trustedadvisor", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:service:region:account-id", - "Severity": "medium", - "ResourceType": "Other", - "Description": "Check Trusted Advisor for errors and warnings.", - "Risk": "Improve the security of your application by closing gaps, enabling various AWS security features and examining your permissions.", - "RelatedUrl": "https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/TrustedAdvisor/checks.html", - "Terraform": "" - }, - "Recommendation": { - "Text": "Review and act upon its recommendations.", - "Url": "https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/" - } + "Provider": "aws", + "CheckID": "trustedadvisor_errors_and_warnings", + "CheckTitle": "Check Trusted Advisor for errors and warnings.", + "CheckType": [], + "ServiceName": "trustedadvisor", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:service:region:account-id", + "Severity": "medium", + "ResourceType": "Other", + "Description": "Check Trusted Advisor for errors and warnings.", + "Risk": "Improve the security of your application by closing gaps, enabling various AWS security features and examining your permissions.", + "RelatedUrl": "https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/TrustedAdvisor/checks.html", + "Terraform": "" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "Review and act upon its recommendations.", + "Url": "https://aws.amazon.com/premiumsupport/technology/trusted-advisor/best-practice-checklist/" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.py b/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.py index 97e53e51..b80c4d03 100644 --- a/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.py +++ b/providers/aws/services/trustedadvisor/trustedadvisor_errors_and_warnings/trustedadvisor_errors_and_warnings.py @@ -9,7 +9,7 @@ class trustedadvisor_errors_and_warnings(Check): findings = [] if trustedadvisor_client.checks: for check in trustedadvisor_client.checks: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = check.region report.resource_id = check.id report.status = "FAIL" diff --git a/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.metadata.json b/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.metadata.json index 16ac8d2d..fa2c6a53 100644 --- a/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.metadata.json +++ b/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "vpc_endpoint_connections_trust_boundaries", "CheckTitle": "Find trust boundaries in VPC endpoint connections.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "vpc", "SubServiceName": "endpoint", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py b/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py index 2b990908..5513df5d 100644 --- a/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py +++ b/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py @@ -12,7 +12,7 @@ class vpc_endpoint_connections_trust_boundaries(Check): # Check VPC endpoint policy for statement in endpoint.policy_document["Statement"]: if "*" == statement["Principal"]: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = endpoint.region report.status = "FAIL" report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} has full access." @@ -27,7 +27,7 @@ class vpc_endpoint_connections_trust_boundaries(Check): principals = statement["Principal"]["AWS"] for principal_arn in principals: account_id = principal_arn.split(":")[4] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = endpoint.region if ( account_id in trusted_account_ids diff --git a/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.metadata.json b/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.metadata.json index d8886635..c2009bd4 100644 --- a/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.metadata.json +++ b/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "vpc_endpoint_services_allowed_principals_trust_boundaries", "CheckTitle": "Find trust boundaries in VPC endpoint services allowlisted principles.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "vpc", "SubServiceName": "service_endpoint", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.py b/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.py index 66d5c7d8..f4e9f6f8 100644 --- a/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.py +++ b/providers/aws/services/vpc/vpc_endpoint_services_allowed_principals_trust_boundaries/vpc_endpoint_services_allowed_principals_trust_boundaries.py @@ -10,7 +10,7 @@ class vpc_endpoint_services_allowed_principals_trust_boundaries(Check): trusted_account_ids = get_config_var("trusted_account_ids") for service in vpc_client.vpc_endpoint_services: if not service.allowed_principals: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = service.region report.status = "PASS" report.status_extended = ( @@ -21,7 +21,7 @@ class vpc_endpoint_services_allowed_principals_trust_boundaries(Check): else: for principal in service.allowed_principals: account_id = principal.split(":")[4] - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = service.region if ( account_id in trusted_account_ids diff --git a/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.metadata.json b/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.metadata.json index 0bcf0e08..b34a944f 100644 --- a/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.metadata.json +++ b/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "vpc_flow_logs_enabled", "CheckTitle": "Ensure VPC Flow Logging is Enabled in all VPCs.", - "CheckType": ["Logging and Monitoring"], + "CheckType": [ + "Logging and Monitoring" + ], "ServiceName": "vpc", "SubServiceName": "flow_log", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.py b/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.py index 2929a05f..e12e7b2a 100644 --- a/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.py +++ b/providers/aws/services/vpc/vpc_flow_logs_enabled/vpc_flow_logs_enabled.py @@ -6,7 +6,7 @@ class vpc_flow_logs_enabled(Check): def execute(self): findings = [] for vpc in vpc_client.vpcs: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = vpc.region if vpc.flow_log: report.status = "PASS" diff --git a/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.metadata.json b/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.metadata.json index 947e4cfa..f6fdecb7 100644 --- a/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.metadata.json +++ b/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.metadata.json @@ -2,7 +2,9 @@ "Provider": "aws", "CheckID": "vpc_peering_routing_tables_with_least_privilege", "CheckTitle": "Ensure routing tables for VPC peering are least access.", - "CheckType": ["Infrastructure Security"], + "CheckType": [ + "Infrastructure Security" + ], "ServiceName": "vpc", "SubServiceName": "route_table", "ResourceIdTemplate": "arn:partition:service:region:account-id:resource-id", @@ -30,6 +32,5 @@ }, "DependsOn": [], "RelatedTo": [], - "Notes": "", - "Compliance": [] + "Notes": "" } diff --git a/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.py b/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.py index 534bcc5f..72e04ed8 100644 --- a/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.py +++ b/providers/aws/services/vpc/vpc_peering_routing_tables_with_least_privilege/vpc_peering_routing_tables_with_least_privilege.py @@ -6,7 +6,7 @@ class vpc_peering_routing_tables_with_least_privilege(Check): def execute(self): findings = [] for peer in vpc_client.vpc_peering_connections: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = peer.region comply = True # Check each cidr in the peering route table diff --git a/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.metadata.json b/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.metadata.json index e8b37123..db207859 100644 --- a/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.metadata.json +++ b/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.metadata.json @@ -1,35 +1,34 @@ { - "Provider": "aws", - "CheckID": "workspaces_volume_encryption_enabled", - "CheckTitle": "Ensure that your Amazon WorkSpaces storage volumes are encrypted in order to meet security and compliance requirements", - "CheckType": [], - "ServiceName": "workspaces", - "SubServiceName": "", - "ResourceIdTemplate": "arn:aws:workspaces:region:account-id:workspace", - "Severity": "high", - "ResourceType": "AwsWorkspaces", - "Description": "Ensure that your Amazon WorkSpaces storage volumes are encrypted in order to meet security and compliance requirements", - "Risk": "If the value listed in the Volume Encryption column is Disabled the selected AWS WorkSpaces instance volumes (root and user volumes) are not encrypted. Therefore your data-at-rest is not protected from unauthorized access and does not meet the compliance requirements regarding data encryption.", - "RelatedUrl": "https://docs.aws.amazon.com/workspaces/latest/adminguide/encrypt-workspaces.html", - "Remediation": { - "Code": { - "CLI": "", - "NativeIaC": "https://docs.bridgecrew.io/docs/ensure-that-workspace-root-volumes-are-encrypted#cloudformation", - "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/WorkSpaces/storage-encryption.html", - "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-workspace-root-volumes-are-encrypted#terraform" - }, - "Recommendation": { - "Text": "WorkSpaces is integrated with the AWS Key Management Service (AWS KMS). This enables you to encrypt storage volumes of WorkSpaces using AWS KMS Key. When you launch a WorkSpace you can encrypt the root volume (for Microsoft Windows - the C drive; for Linux - /) and the user volume (for Windows - the D drive; for Linux - /home). Doing so ensures that the data stored at rest - disk I/O to the volume - and snapshots created from the volumes are all encrypted", - "Url": "https://docs.aws.amazon.com/workspaces/latest/adminguide/encrypt-workspaces.html" - } + "Provider": "aws", + "CheckID": "workspaces_volume_encryption_enabled", + "CheckTitle": "Ensure that your Amazon WorkSpaces storage volumes are encrypted in order to meet security and compliance requirements", + "CheckType": [], + "ServiceName": "workspaces", + "SubServiceName": "", + "ResourceIdTemplate": "arn:aws:workspaces:region:account-id:workspace", + "Severity": "high", + "ResourceType": "AwsWorkspaces", + "Description": "Ensure that your Amazon WorkSpaces storage volumes are encrypted in order to meet security and compliance requirements", + "Risk": "If the value listed in the Volume Encryption column is Disabled the selected AWS WorkSpaces instance volumes (root and user volumes) are not encrypted. Therefore your data-at-rest is not protected from unauthorized access and does not meet the compliance requirements regarding data encryption.", + "RelatedUrl": "https://docs.aws.amazon.com/workspaces/latest/adminguide/encrypt-workspaces.html", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "https://docs.bridgecrew.io/docs/ensure-that-workspace-root-volumes-are-encrypted#cloudformation", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/WorkSpaces/storage-encryption.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-workspace-root-volumes-are-encrypted#terraform" }, - "Categories": [], - "Tags": { - "Tag1Key": "value", - "Tag2Key": "value" - }, - "DependsOn": [], - "RelatedTo": [], - "Notes": "", - "Compliance": [] - } + "Recommendation": { + "Text": "WorkSpaces is integrated with the AWS Key Management Service (AWS KMS). This enables you to encrypt storage volumes of WorkSpaces using AWS KMS Key. When you launch a WorkSpace you can encrypt the root volume (for Microsoft Windows - the C drive; for Linux - /) and the user volume (for Windows - the D drive; for Linux - /home). Doing so ensures that the data stored at rest - disk I/O to the volume - and snapshots created from the volumes are all encrypted", + "Url": "https://docs.aws.amazon.com/workspaces/latest/adminguide/encrypt-workspaces.html" + } + }, + "Categories": [], + "Tags": { + "Tag1Key": "value", + "Tag2Key": "value" + }, + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.py b/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.py index 93ae070b..c4095db6 100644 --- a/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.py +++ b/providers/aws/services/workspaces/workspaces_volume_encryption_enabled/workspaces_volume_encryption_enabled.py @@ -6,7 +6,7 @@ class workspaces_volume_encryption_enabled(Check): def execute(self): findings = [] for workspace in workspaces_client.workspaces: - report = Check_Report(self.metadata) + report = Check_Report(self.metadata()) report.region = workspace.region report.resource_id = workspace.id report.resource_arn = workspace.arn diff --git a/prowler b/prowler index 03209b5f..f304a17c 100755 --- a/prowler +++ b/prowler @@ -14,6 +14,7 @@ from config.config import ( from lib.banner import print_banner, print_version from lib.check.check import ( bulk_load_checks_metadata, + bulk_load_compliance_frameworks, exclude_checks_to_run, exclude_groups_to_run, exclude_services_to_run, @@ -21,12 +22,20 @@ from lib.check.check import ( list_groups, list_services, print_checks, + print_compliance_frameworks, + print_compliance_requirements, print_services, set_output_options, ) from lib.check.checks_loader import load_checks_to_execute +from lib.check.compliance import update_checks_metadata_with_compliance from lib.logger import logger, set_logging_config -from lib.outputs.outputs import close_json, display_summary_table, send_to_s3_bucket +from lib.outputs.outputs import ( + close_json, + display_compliance_table, + display_summary_table, + send_to_s3_bucket, +) from providers.aws.aws_provider import provider_set_session from providers.aws.lib.allowlist.allowlist import parse_allowlist_file from providers.aws.lib.security_hub.security_hub import ( @@ -57,6 +66,12 @@ if __name__ == "__main__": help="List of severities [informational, low, medium, high, critical]", choices=["informational", "low", "medium", "high", "critical"], ) + group.add_argument( + "--compliance", + nargs="+", + help="Compliance Framework to check against for. The format should be the following: framework_version_provider (e.g.: ens_rd2022_aws)", + choices=["ens_rd2022_aws"], + ) # Exclude checks options parser.add_argument("-e", "--excluded-checks", nargs="+", help="Checks to exclude") parser.add_argument("-E", "--excluded-groups", nargs="+", help="Groups to exclude") @@ -72,7 +87,14 @@ if __name__ == "__main__": list_group.add_argument( "--list-services", action="store_true", help="List services" ) - + list_group.add_argument( + "--list-compliance", action="store_true", help="List compliance frameworks" + ) + list_group.add_argument( + "--list-compliance-requirements", + nargs="?", + help="List compliance requirements for a given requirement", + ) parser.add_argument( "-b", "--no-banner", action="store_false", help="Hide Prowler banner" ) @@ -213,7 +235,11 @@ if __name__ == "__main__": output_directory = args.output_directory output_filename = args.output_filename severities = args.severity + compliance_framework = args.compliance output_modes = args.output_modes + # We treat the compliance framework as another output format + if compliance_framework: + output_modes.extend(compliance_framework) # Set Logger configuration set_logging_config(args.log_file, args.log_level) @@ -248,17 +274,42 @@ if __name__ == "__main__": # Load checks metadata logger.debug("Loading checks metadata from .metadata.json files") bulk_checks_metadata = bulk_load_checks_metadata(provider) + bulk_compliance_frameworks = {} + # Load compliance frameworks + logger.debug("Loading compliance frameworks from .json files") + + # Load the compliance framework if specified with --compliance + # If some compliance argument is specified we have to load it + if ( + args.list_compliance + or args.list_compliance_requirements + or compliance_framework + ): + bulk_compliance_frameworks = bulk_load_compliance_frameworks(provider) + # Complete checks metadata with the compliance framework specification + update_checks_metadata_with_compliance( + bulk_compliance_frameworks, bulk_checks_metadata + ) + if args.list_compliance: + print_compliance_frameworks(bulk_compliance_frameworks) + sys.exit() + if args.list_compliance_requirements: + print_compliance_requirements(bulk_compliance_frameworks) + sys.exit() # Load checks to execute checks_to_execute = load_checks_to_execute( bulk_checks_metadata, + bulk_compliance_frameworks, checks_file, checks, services, groups, severities, + compliance_framework, provider, ) + # Exclude checks if -e/--excluded-checks if excluded_checks: checks_to_execute = exclude_checks_to_run(checks_to_execute, excluded_checks) @@ -326,6 +377,7 @@ if __name__ == "__main__": args.security_hub, output_filename, allowlist_file, + bulk_checks_metadata, args.verbose, ) @@ -365,10 +417,21 @@ if __name__ == "__main__": if args.security_hub: resolve_security_hub_previous_findings(output_directory, audit_info) - # Display summary table - display_summary_table( - findings, - audit_info, - output_filename, - output_directory, - ) + if findings: + # Display summary table + display_summary_table( + findings, + audit_info, + output_filename, + output_directory, + ) + + if compliance_framework: + # Display compliance table + display_compliance_table( + findings, + bulk_checks_metadata, + compliance_framework, + output_filename, + output_directory, + )