diff --git a/README.md b/README.md index f02dc504..fb453d30 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ It contains hundreds of controls covering CIS, NIST 800, NIST CSF, CISA, RBI, Fe | Provider | Checks | Services | [Compliance Frameworks](https://docs.prowler.cloud/en/latest/tutorials/compliance/) | [Categories](https://docs.prowler.cloud/en/latest/tutorials/misc/#categories) | |---|---|---|---|---| | AWS | 283 | 55 -> `prowler aws --list-services` | 21 -> `prowler aws --list-compliance` | 5 -> `prowler aws --list-categories` | -| GCP | 59 | 10 -> `prowler gcp --list-services` | CIS soon | 0 -> `prowler gcp --list-categories`| +| GCP | 73 | 11 -> `prowler gcp --list-services` | 1 -> `prowler gcp --list-compliance` | 0 -> `prowler gcp --list-categories`| | Azure | 20 | 3 -> `prowler azure --list-services` | CIS soon | 1 -> `prowler azure --list-categories` | | Kubernetes | Planned | - | - | - | diff --git a/prowler/compliance/gcp/cis_2.0_gcp.json b/prowler/compliance/gcp/cis_2.0_gcp.json new file mode 100644 index 00000000..bf11b8f4 --- /dev/null +++ b/prowler/compliance/gcp/cis_2.0_gcp.json @@ -0,0 +1,1750 @@ +{ + "Framework": "CIS", + "Version": "2.0", + "Provider": "GCP", + "Description": "This CIS Benchmark is the product of a community consensus process and consists of secure configuration guidelines developed for Google Cloud Computing Platform", + "Requirements": [ + { + "Id": "1.1", + "Description": "Use corporate login credentials instead of personal accounts, such as Gmail accounts.", + "Checks": [], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Use corporate login credentials instead of personal accounts, such as Gmail accounts.", + "RationaleStatement": "It is recommended fully-managed corporate Google accounts be used for increased visibility, auditing, and controlling access to Cloud Platform resources. Email accounts based outside of the user's organization, such as personal accounts, should not be used for business purposes.", + "ImpactStatement": "There will be increased overhead as maintaining accounts will now be required. For smaller organizations, this will not be an issue, but will balloon with size.", + "RemediationProcedure": "Follow the documentation and setup corporate login accounts.\n\n**Prevention:**\nTo ensure that no email addresses outside the organization can be granted IAM permissions to its Google Cloud projects, folders or organization, turn on the Organization Policy for `Domain Restricted Sharing`. Learn more at: https://cloud.google.com/resource-manager/docs/organization-policy/restricting-domains(https://cloud.google.com/resource-manager/docs/organization-policy/restricting-domains)", + "AuditProcedure": "For each Google Cloud Platform project, list the accounts that have been granted access to that project:\n\n**From Google Cloud CLI**\n\n```\ngcloud projects get-iam-policy PROJECT_ID\n```\n\nAlso list the accounts added on each folder: \n\n```\ngcloud resource-manager folders get-iam-policy FOLDER_ID \n```\n\nAnd list your organization's IAM policy: \n\n```\ngcloud organizations get-iam-policy ORGANIZATION_ID\n```\n\nNo email accounts outside the organization domain should be granted permissions in the IAM policies. This excludes Google-owned service accounts.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/docs/enterprise/best-practices-for-enterprise-organizations#manage-identities:https://support.google.com/work/android/answer/6371476:https://cloud.google.com/sdk/gcloud/reference/organizations/get-iam-policy:https://cloud.google.com/sdk/gcloud/reference/beta/resource-manager/folders/get-iam-policy:https://cloud.google.com/sdk/gcloud/reference/projects/get-iam-policy:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints:https://cloud.google.com/resource-manager/docs/organization-policy/restricting-domains" + } + ] + }, + { + "Id": "1.2", + "Description": "Setup multi-factor authentication for Google Cloud Platform accounts.", + "Checks": [], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Setup multi-factor authentication for Google Cloud Platform accounts.", + "RationaleStatement": "Multi-factor authentication requires more than one mechanism to authenticate a user. This secures user logins from attackers exploiting stolen or weak credentials.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud Console**\n\nFor each Google Cloud Platform project:\n\n1. Identify non-service accounts.\n\n1. Setup multi-factor authentication for each account.", + "AuditProcedure": "**From Google Cloud Console**\n\nFor each Google Cloud Platform project, folder, or organization:\n\n1. Identify non-service accounts.\n\n1. Manually verify that multi-factor authentication for each account is set.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/solutions/securing-gcp-account-u2f:https://support.google.com/accounts/answer/185839" + } + ] + }, + { + "Id": "1.3", + "Description": "Setup Security Key Enforcement for Google Cloud Platform admin accounts.", + "Checks": [], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Setup Security Key Enforcement for Google Cloud Platform admin accounts.", + "RationaleStatement": "Google Cloud Platform users with Organization Administrator roles have the highest level of privilege in the organization. These accounts should be protected with the strongest form of two-factor authentication: Security Key Enforcement. Ensure that admins use Security Keys to log in instead of weaker second factors like SMS or one-time passwords (OTP). Security Keys are actual physical keys used to access Google Organization Administrator Accounts. They send an encrypted signature rather than a code, ensuring that logins cannot be phished.", + "ImpactStatement": "If an organization administrator loses access to their security key, the user could lose access to their account. For this reason, it is important to set up backup security keys.", + "RemediationProcedure": "1. Identify users with the Organization Administrator role.\n\n2. Setup Security Key Enforcement for each account. Learn more at: https://cloud.google.com/security-key/(https://cloud.google.com/security-key/)", + "AuditProcedure": "1. Identify users with Organization Administrator privileges:\n\n```\ngcloud organizations get-iam-policy ORGANIZATION_ID\n```\n\nLook for members granted the role \"roles/resourcemanager.organizationAdmin\".\n\n2. Manually verify that Security Key Enforcement has been enabled for each account.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/security-key/:https://gsuite.google.com/learn-more/key_for_working_smarter_faster_and_more_securely.html" + } + ] + }, + { + "Id": "1.14", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. API keys are always at risk because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API keys to use (call) only APIs required by an application.", + "Checks": [ + "apikeys_api_restrictions_configured" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. API keys are always at risk because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API keys to use (call) only APIs required by an application.", + "RationaleStatement": "Security risks involved in using API-Keys are below:\n\n- API keys are simple encrypted strings\n\n- API keys do not identify the user or the application making the API request\n\n- API keys are typically accessible to clients, making it easy to discover and steal an API key\n\nIn light of these potential risks, Google recommends using the standard authentication flow instead of API-Keys. However, there are limited cases where API keys are more appropriate. For example, if there is a mobile application that needs to use the Google Cloud Translation API, but doesn't otherwise need a backend server, API keys are the simplest way to authenticate to that API.\n\nIn order to reduce attack surfaces by providing `least privileges`, API-Keys can be restricted to use (call) only APIs required by an application.", + "ImpactStatement": "Setting `API restrictions` may break existing application functioning, if not done carefully.", + "RemediationProcedure": "**From Console:**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the section `API Keys`, Click the `API Key Name`. The API Key properties display on a new page.\n\n3. In the `Key restrictions` section go to `API restrictions`.\n\n4. Click the `Select API` drop-down to choose an API.\n\n5. Click `Save`.\n\n6. Repeat steps 2,3,4,5 for every unrestricted API key\n\n**Note:** Do not set `API restrictions` to `Google Cloud APIs`, as this option allows access to all services offered by Google cloud.\n\n**From Google Cloud CLI**\n\n1. List all API keys.\n```\ngcloud services api-keys list\n```\n2. Note the `UID` of the key to add restrictions to.\n3. Run the update command with the appropriate flags to add the required restrictions.\n```\ngcloud alpha services api-keys update \n```\nNote- Flags can be found by running\n```\ngcloud alpha services api-keys update --help\n```\nor in this documentation\nhttps://cloud.google.com/sdk/gcloud/reference/alpha/services/api-keys/update", + "AuditProcedure": "**From Console:**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the section `API Keys`, Click the `API Key Name`. The API Key properties display on a new page.\n\n3. For every API Key, ensure the section `Key restrictions` parameter `API restrictions` is not set to `None`.\n\nOr, \n\nEnsure `API restrictions` is not set to `Google Cloud APIs`\n\n**Note:** `Google Cloud APIs` represents the API collection of all cloud services/APIs offered by Google cloud.\n\n**From Google Cloud CLI**\n\n1. List all API Keys.\n```\ngcloud services api-keys list\n```\nEach key should have a line that says `restrictions:` followed by varying parameters and NOT have a line saying `- service: cloudapis.googleapis.com` as shown here\n```\n restrictions:\n apiTargets:\n - service: cloudapis.googleapis.com\n\n```", + "AdditionalInformation": "Some of the gcloud commands listed are currently in alpha and might change without notice.", + "References": "https://cloud.google.com/docs/authentication/api-keys:https://cloud.google.com/apis/docs/overview" + } + ] + }, + { + "Id": "1.15", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. If they are in use it is recommended to rotate API keys every 90 days.", + "Checks": [ + "apikeys_key_rotated_in_90_days" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. If they are in use it is recommended to rotate API keys every 90 days.", + "RationaleStatement": "Security risks involved in using API-Keys are listed below:\n\n- API keys are simple encrypted strings\n\n- API keys do not identify the user or the application making the API request\n\n- API keys are typically accessible to clients, making it easy to discover and steal an API key\n\nBecause of these potential risks, Google recommends using the standard authentication flow instead of API Keys. However, there are limited cases where API keys are more appropriate. For example, if there is a mobile application that needs to use the Google Cloud Translation API, but doesn't otherwise need a backend server, API keys are the simplest way to authenticate to that API.\n\nOnce a key is stolen, it has no expiration, meaning it may be used indefinitely unless the project owner revokes or regenerates the key. \nRotating API keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used. \n\nAPI keys should be rotated to ensure that data cannot be accessed with an old key that might have been lost, cracked, or stolen.", + "ImpactStatement": "`Regenerating Key` may break existing client connectivity as the client will try to connect with older API keys they have stored on devices.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the section `API Keys`, Click the `API Key Name`. The API Key properties display on a new page.\n\n3. Click `REGENERATE KEY` to rotate API key.\n\n4. Click `Save`.\n\n5. Repeat steps 2,3,4 for every API key that has not been rotated in the last 90 days.\n\n**Note:** Do not set `HTTP referrers` to wild-cards (* or *.TLD or *.TLD/*) allowing access to any/wide HTTP referrer(s)\nDo not set `IP addresses` and referrer to `any host (0.0.0.0 or 0.0.0.0/0 or ::0)`\n\n**From Google Cloud CLI**\n\nThere is not currently a way to regenerate and API key using gcloud commands. To 'regenerate' a key you will need to create a new one, duplicate the restrictions from the key being rotated, and delete the old key.\n\n1. List existing keys.\n```\ngcloud services api-keys list\n```\n2. Note the `UID` and restrictions of the key to regenerate.\n\n3. Run this command to create a new API key. is the display name of the new key.\n````\ngcloud alpha services api-keys create --display-name=\"\"\n````\nNote the `UID` of the newly created key\n\n4. Run the update command to add required restrictions. \n\nNote - the restriction may vary for each key. Refer to this documentation for the appropriate flags.\nhttps://cloud.google.com/sdk/gcloud/reference/alpha/services/api-keys/update\n```\ngcloud alpha services api-keys update \n```\n5. Delete the old key.\n```\ngcloud alpha services api-keys delete \n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the section `API Keys`, for every key ensure the `creation date` is less than 90 days.\n\n**From Google Cloud CLI**\n\nTo list keys, use the command\n\n```\ngcloud services api-keys list\n```\nEnsure the date in `createTime` is within 90 days.", + "AdditionalInformation": "There is no option to automatically regenerate (rotate) API keys periodically.", + "References": "https://developers.google.com/maps/api-security-best-practices#regenerate-apikey:https://cloud.google.com/sdk/gcloud/reference/alpha/services/api-keys" + } + ] + }, + { + "Id": "1.12", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead.", + "Checks": [ + "apikeys_key_exists" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead.", + "RationaleStatement": "To avoid the security risk in using API keys, it is recommended to use standard authentication flow instead. Security risks involved in using API-Keys appear below:\n\n- API keys are simple encrypted strings\n\n- API keys do not identify the user or the application making the API request\n\n- API keys are typically accessible to clients, making it easy to discover and steal an API key", + "ImpactStatement": "Deleting an API key will break dependent applications (if any).", + "RemediationProcedure": "**From Console:**\n\n1. Go to `APIs & Services\\Credentials` using\n\n1. In the section `API Keys`, to delete API Keys: Click the `Delete Bin Icon` in front of every `API Key Name`.\n\n**From Google Cloud Command Line**\n\n1. Run the following from within the project you wish to audit **`gcloud services api-keys list --filter`**\n\n1. **Pipe the results into ** \n``gcloud alpha services api-keys delete``", + "AuditProcedure": "**From Console:**\n\n1. From within the Project you wish to audit Go to `APIs & Services\\Credentials`. \n\n1. In the section `API Keys`, no API key should be listed.\n\n**From Google Cloud Command Line**\n\n1. Run the following from within the project you wish to audit **`gcloud services api-keys list --filter`**.\n\n1. There should be no keys listed at the project level.", + "AdditionalInformation": "Google recommends using the standard authentication flow instead of using API keys. However, there are limited cases where API keys are more appropriate. For example, if there is a mobile application that needs to use the Google Cloud Translation API, but doesn't otherwise need a backend server, API keys are the simplest way to authenticate to that API.\n\nIf a business requires API keys to be used, then the API keys should be secured properly.", + "References": "https://cloud.google.com/docs/authentication/api-keys:https://cloud.google.com/sdk/gcloud/reference/services/api-keys/list:https://cloud.google.com/docs/authentication:https://cloud.google.com/sdk/gcloud/reference/alpha/services/api-keys/delete" + } + ] + }, + { + "Id": "1.16", + "Description": "It is recommended that Essential Contacts is configured to designate email addresses for Google Cloud services to notify of important technical or security information.", + "Checks": [ + "iam_organization_essential_contacts_configured" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended that Essential Contacts is configured to designate email addresses for Google Cloud services to notify of important technical or security information.", + "RationaleStatement": "Many Google Cloud services, such as Cloud Billing, send out notifications to share important information with Google Cloud users. By default, these notifications are sent to members with certain Identity and Access Management (IAM) roles. With Essential Contacts, you can customize who receives notifications by providing your own list of contacts.", + "ImpactStatement": "There is no charge for Essential Contacts.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `Essential Contacts` by visiting https://console.cloud.google.com/iam-admin/essential-contacts\n2. Make sure the organization appears in the resource selector at the top of the page. The resource selector tells you what project, folder, or organization you are currently managing contacts for.\n3. Click `+Add contact`\n4. In the `Email` and `Confirm Email` fields, enter the email address of the contact.\n5. From the `Notification categories` drop-down menu, select the notification categories that you want the contact to receive communications for.\n6. Click `Save`\n\n**From Google Cloud CLI**\n\n1. To add an organization Essential Contacts run a command:\n```\ngcloud essential-contacts create --email=\"\" \\\n --notification-categories=\"\" \\\n --organization=\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Essential Contacts` by visiting https://console.cloud.google.com/iam-admin/essential-contacts\n2. Make sure the organization appears in the resource selector at the top of the page. The resource selector tells you what project, folder, or organization you are currently managing contacts for.\n3. Ensure that appropriate email addresses are configured for each of the following notification categories:\n- `Legal`\n- `Security`\n- `Suspension`\n- `Technical`\n- `Technical Incidents`\n\nAlternatively, appropriate email addresses can be configured for the `All` notification category to receive all possible important notifications.\n\n**From Google Cloud CLI**\n\n1. To list all configured organization Essential Contacts run a command:\n```\ngcloud essential-contacts list --organization=\n``` \n2. Ensure at least one appropriate email address is configured for each of the following notification categories:\n- `LEGAL`\n- `SECURITY`\n- `SUSPENSION`\n- `TECHNICAL`\n- `TECHNICAL_INCIDENTS`\n\nAlternatively, appropriate email addresses can be configured for the `ALL` notification category to receive all possible important notifications.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/resource-manager/docs/managing-notification-contacts" + } + ] + }, + { + "Id": "1.10", + "Description": "Google Cloud Key Management Service stores cryptographic keys in a hierarchical structure designed for useful and elegant access control management. \n\nThe format for the rotation schedule depends on the client library that is used. For the gcloud command-line tool, the next rotation time must be in `ISO` or `RFC3339` format, and the rotation period must be in the form `INTEGERUNIT`, where units can be one of seconds (s), minutes (m), hours (h) or days (d).", + "Checks": [ + "kms_key_rotation_enabled" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Google Cloud Key Management Service stores cryptographic keys in a hierarchical structure designed for useful and elegant access control management. \n\nThe format for the rotation schedule depends on the client library that is used. For the gcloud command-line tool, the next rotation time must be in `ISO` or `RFC3339` format, and the rotation period must be in the form `INTEGERUNIT`, where units can be one of seconds (s), minutes (m), hours (h) or days (d).", + "RationaleStatement": "Set a key rotation period and starting time. A key can be created with a specified `rotation period`, which is the time between when new key versions are generated automatically. A key can also be created with a specified next rotation time. A key is a named object representing a `cryptographic key` used for a specific purpose. The key material, the actual bits used for `encryption`, can change over time as new key versions are created.\n\nA key is used to protect some `corpus of data`. A collection of files could be encrypted with the same key and people with `decrypt` permissions on that key would be able to decrypt those files. Therefore, it's necessary to make sure the `rotation period` is set to a specific time.", + "ImpactStatement": "After a successful key rotation, the older key version is required in order to decrypt the data encrypted by that previous key version.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `Cryptographic Keys` by visiting: https://console.cloud.google.com/security/kms(https://console.cloud.google.com/security/kms).\n2. Click on the specific key ring\n3. From the list of keys, choose the specific key and Click on `Right side pop up the blade (3 dots)`.\n4. Click on `Edit rotation period`.\n5. On the pop-up window, `Select a new rotation period` in days which should be less than 90 and then choose `Starting on` date (date from which the rotation period begins).\n\n**From Google Cloud CLI**\n\n1. Update and schedule rotation by `ROTATION_PERIOD` and `NEXT_ROTATION_TIME` for each key:\n\n```\ngcloud kms keys update new --keyring=KEY_RING --location=LOCATION --next-rotation-time=NEXT_ROTATION_TIME --rotation-period=ROTATION_PERIOD\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Cryptographic Keys` by visiting: https://console.cloud.google.com/security/kms(https://console.cloud.google.com/security/kms).\n2. Click on each key ring, then ensure each key in the keyring has `Next Rotation` set for less than 90 days from the current date.\n\n**From Google Cloud CLI**\n\n1. Ensure rotation is scheduled by `ROTATION_PERIOD` and `NEXT_ROTATION_TIME` for each key :\n\n```\ngcloud kms keys list --keyring= --location= --format=json'(rotationPeriod)'\n```\n\nEnsure outcome values for `rotationPeriod` and `nextRotationTime` satisfy the below criteria:\n\n`rotationPeriod is <= 129600m` \n`rotationPeriod is <= 7776000s` \n`rotationPeriod is <= 2160h` \n`rotationPeriod is <= 90d` \n`nextRotationTime is <= 90days` from current DATE", + "AdditionalInformation": "'- Key rotation does NOT re-encrypt already encrypted data with the newly generated key version. If you suspect unauthorized use of a key, you should re-encrypt the data protected by that key and then disable or schedule destruction of the prior key version.\n- It is not recommended to rely solely on irregular rotation, but rather to use irregular rotation if needed in conjunction with a regular rotation schedule.", + "References": "https://cloud.google.com/kms/docs/key-rotation#frequency_of_key_rotation:https://cloud.google.com/kms/docs/re-encrypt-data" + } + ] + }, + { + "Id": "1.9", + "Description": "It is recommended that the IAM policy on Cloud KMS `cryptokeys` should restrict anonymous and/or public access.", + "Checks": [ + "kms_key_not_publicly_accessible" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended that the IAM policy on Cloud KMS `cryptokeys` should restrict anonymous and/or public access.", + "RationaleStatement": "Granting permissions to `allUsers` or `allAuthenticatedUsers` allows anyone to access the dataset. Such access might not be desirable if sensitive data is stored at the location. In this case, ensure that anonymous and/or public access to a Cloud KMS `cryptokey` is not allowed.", + "ImpactStatement": "Removing the binding for `allUsers` and `allAuthenticatedUsers` members denies accessing `cryptokeys` to anonymous or public users.", + "RemediationProcedure": "**From Google Cloud CLI**\n\n1. List all Cloud KMS `Cryptokeys`.\n\n```\ngcloud kms keys list --keyring=key_ring_name --location=global --format=json | jq '..name'\n```\n2. Remove IAM policy binding for a KMS key to remove access to `allUsers` and `allAuthenticatedUsers` using the below command.\n\n```\ngcloud kms keys remove-iam-policy-binding key_name --keyring=key_ring_name --location=global --member='allAuthenticatedUsers' --role='role'\n\ngcloud kms keys remove-iam-policy-binding key_name --keyring=key_ring_name --location=global --member='allUsers' --role='role'\n```", + "AuditProcedure": "**From Google Cloud CLI**\n\n1. List all Cloud KMS `Cryptokeys`.\n```\ngcloud kms keys list --keyring=key_ring_name --location=global --format=json | jq '..name'\n```\n2. Ensure the below command's output does not contain `allUsers` or `allAuthenticatedUsers`.\n```\ngcloud kms keys get-iam-policy key_name --keyring=key_ring_name --location=global --format=json | jq '.bindings.members'\n```", + "AdditionalInformation": "key_ring_name : Is the resource ID of the key ring, which is the fully-qualified Key ring name. This value is case-sensitive and in the form: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING\n\nYou can retrieve the key ring resource ID using the Cloud Console:\n\n1. Open the `Cryptographic Keys` page in the Cloud Console.\n2. For the key ring whose resource ID you are retrieving, click the `More icon (3 vertical dots)`.\n3. Click `Copy Resource ID`. The resource ID for the key ring is copied to your clipboard.\n\nkey_name : Is the resource ID of the key, which is the fully-qualified CryptoKey name. This value is case-sensitive and in the form: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY\n\nYou can retrieve the key resource ID using the Cloud Console:\n1. Open the `Cryptographic Keys` page in the Cloud Console.\n2. Click the name of the key ring that contains the key.\n3. For the key whose resource ID you are retrieving, click the `More icon (3 vertical dots)`.\n4. Click `Copy Resource ID`. The resource ID for the key is copied to your clipboard.\n\nrole : The role to remove the member from.", + "References": "https://cloud.google.com/sdk/gcloud/reference/kms/keys/remove-iam-policy-binding:https://cloud.google.com/sdk/gcloud/reference/kms/keys/set-iam-policy:https://cloud.google.com/sdk/gcloud/reference/kms/keys/get-iam-policy:https://cloud.google.com/kms/docs/object-hierarchy#key_resource_id" + } + ] + }, + { + "Id": "1.17", + "Description": "When you use Dataproc, cluster and job data is stored on Persistent Disks (PDs) associated with the Compute Engine VMs in your cluster and in a Cloud Storage staging bucket. This PD and bucket data is encrypted using a Google-generated data encryption key (DEK) and key encryption key (KEK). The CMEK feature allows you to create, use, and revoke the key encryption key (KEK). Google still controls the data encryption key (DEK).", + "Checks": [ + "dataproc_encrypted_with_cmks_disabled" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "When you use Dataproc, cluster and job data is stored on Persistent Disks (PDs) associated with the Compute Engine VMs in your cluster and in a Cloud Storage staging bucket. This PD and bucket data is encrypted using a Google-generated data encryption key (DEK) and key encryption key (KEK). The CMEK feature allows you to create, use, and revoke the key encryption key (KEK). Google still controls the data encryption key (DEK).", + "RationaleStatement": "\"Cloud services offer the ability to protect data related to those services using encryption keys managed by the customer within Cloud KMS. These encryption keys are called customer-managed encryption keys (CMEK). When you protect data in Google Cloud services with CMEK, the CMEK key is within your control.", + "ImpactStatement": "Using Customer Managed Keys involves additional overhead in maintenance by administrators.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Login to the GCP Console and navigate to the Dataproc Cluster page by visiting https://console.cloud.google.com/dataproc/clusters.\n1. Select the project from the projects dropdown list.\n1. On the `Dataproc Cluster` page, click on the `Create Cluster` to create a new cluster with Customer managed encryption keys.\n1. On `Create a cluster` page, perform below steps:\n - Inside `Set up cluster` section perform below steps:\n -In the `Name` textbox, provide a name for your cluster.\n - From `Location` select the location in which you want to deploy a cluster.\n - Configure other configurations as per your requirements.\n - Inside `Configure Nodes` and `Customize cluster` section configure the settings as per your requirements.\n - Inside `Manage security` section, perform below steps:\n - From `Encryption`, select `Customer-managed key`.\n - Select a customer-managed key from dropdown list.\n - Ensure that the selected KMS Key have Cloud KMS CryptoKey Encrypter/Decrypter role assign to Dataproc Cluster service account (\"serviceAccount:service-@compute-system.iam.gserviceaccount.com\").\n - Click on `Create` to create a cluster.\n - Once the cluster is created migrate all your workloads from the older cluster to the new cluster and delete the old cluster by performing the below steps:\n - On the `Clusters` page, select the old cluster and click on `Delete cluster`.\n - On the `Confirm deletion` window, click on `Confirm` to delete the cluster.\n - Repeat step above for other Dataproc clusters available in the selected project.\n - Change the project from the project dropdown list and repeat the remediation procedure for other Dataproc clusters available in other projects.\n\n**From Google Cloud CLI**\n\nBefore creating cluster ensure that the selected KMS Key have Cloud KMS CryptoKey Encrypter/Decrypter role assign to Dataproc Cluster service account (\"serviceAccount:service-@compute-system.iam.gserviceaccount.com\").\nRun clusters create command to create new cluster with customer-managed key:\n```\ngcloud dataproc clusters create --region=us-central1 --gce-pd-kms-key=\n```\nThe above command will create a new cluster in the selected region.\n\nOnce the cluster is created migrate all your workloads from the older cluster to the new cluster and Run clusters delete command to delete cluster:\n```\ngcloud dataproc clusters delete --region=us-central1\n```\nRepeat step no. 1 to create a new Dataproc cluster.\nChange the project by running the below command and repeat the remediation procedure for other projects:\n```\ngcloud config set project \"\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Login to the GCP Console and navigate to the Dataproc Cluster page by visiting https://console.cloud.google.com/dataproc/clusters.\n1. Select the project from the project dropdown list.\n1. On the `Dataproc Clusters` page, select the cluster and click on the Name attribute value that you want to examine.\n1. On the `details` page, select the `Configurations` tab.\n1. On the `Configurations` tab, check the `Encryption type` configuration attribute value. If the value is set to `Google-managed key`, then Dataproc Cluster is not encrypted with Customer managed encryption keys.\n\nRepeat step no. 3 - 5 for other Dataproc Clusters available in the selected project.\n\n6. Change the project from the project dropdown list and repeat the audit procedure for other projects.\n\n**From Google Cloud CLI**\n\n1. Run clusters list command to list all the Dataproc Clusters available in the region:\n```\ngcloud dataproc clusters list --region='us-central1'\n```\n2. Run clusters describe command to get the key details of the selected cluster:\n```\ngcloud dataproc clusters describe --region=us-central1 --flatten=config.encryptionConfig.gcePdKmsKeyName\n```\n3. If the above command output return \"null\", then the selected cluster is not encrypted with Customer managed encryption keys.\n4. Repeat step no. 2 and 3 for other Dataproc Clusters available in the selected region. Change the region by updating --region and repeat step no. 2 for other clusters available in the project. Change the project by running the below command and repeat the audit procedure for other Dataproc clusters available in other projects:\n```\ngcloud config set project \"\n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/docs/security/encryption/default-encryption" + } + ] + }, + { + "Id": "1.6", + "Description": "It is recommended to assign the `Service Account User (iam.serviceAccountUser)` and `Service Account Token Creator (iam.serviceAccountTokenCreator)` roles to a user for a specific service account rather than assigning the role to a user at project level.", + "Checks": [ + "iam_no_service_roles_at_project_level" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to assign the `Service Account User (iam.serviceAccountUser)` and `Service Account Token Creator (iam.serviceAccountTokenCreator)` roles to a user for a specific service account rather than assigning the role to a user at project level.", + "RationaleStatement": "A service account is a special Google account that belongs to an application or a virtual machine (VM), instead of to an individual end-user. Application/VM-Instance uses the service account to call the service's Google API so that users aren't directly involved.\nIn addition to being an identity, a service account is a resource that has IAM policies attached to it. These policies determine who can use the service account.\n\nUsers with IAM roles to update the App Engine and Compute Engine instances (such as App Engine Deployer or Compute Instance Admin) can effectively run code as the service accounts used to run these instances, and indirectly gain access to all the resources for which the service accounts have access. Similarly, SSH access to a Compute Engine instance may also provide the ability to execute code as that instance/Service account.\n\nBased on business needs, there could be multiple user-managed service accounts configured for a project. Granting the `iam.serviceAccountUser` or `iam.serviceAccountTokenCreator` roles to a user for a project gives the user access to all service accounts in the project, including service accounts that may be created in the future. This can result in elevation of privileges by using service accounts and corresponding `Compute Engine instances`.\n\nIn order to implement `least privileges` best practices, IAM users should not be assigned the `Service Account User` or `Service Account Token Creator` roles at the project level. Instead, these roles should be assigned to a user for a specific service account, giving that user access to the service account. The `Service Account User` allows a user to bind a service account to a long-running job service, whereas the `Service Account Token Creator` role allows a user to directly impersonate (or assert) the identity of a service account.", + "ImpactStatement": "After revoking `Service Account User` or `Service Account Token Creator` roles at the project level from all impacted user account(s), these roles should be assigned to a user(s) for specific service account(s) according to business needs.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the IAM page in the GCP Console by visiting: https://console.cloud.google.com/iam-admin/iam(https://console.cloud.google.com/iam-admin/iam).\n\n2. Click on the filter table text bar. Type `Role: Service Account User`\n\n3. Click the `Delete Bin` icon in front of the role `Service Account User` for every user listed as a result of a filter.\n\n4. Click on the filter table text bar. Type `Role: Service Account Token Creator`\n\n5. Click the `Delete Bin` icon in front of the role `Service Account Token Creator` for every user listed as a result of a filter.\n\n**From Google Cloud CLI**\n\n1. Using a text editor, remove the bindings with the `roles/iam.serviceAccountUser` or `roles/iam.serviceAccountTokenCreator`. \n\nFor example, you can use the iam.json file shown below as follows:\n\n {\n \"bindings\": \n {\n \"members\": \n \"serviceAccount:our-project-123@appspot.gserviceaccount.com\",\n ,\n \"role\": \"roles/appengine.appViewer\"\n },\n {\n \"members\": \n \"user:email1@gmail.com\"\n ,\n \"role\": \"roles/owner\"\n },\n {\n \"members\": \n \"serviceAccount:our-project-123@appspot.gserviceaccount.com\",\n \"serviceAccount:123456789012-compute@developer.gserviceaccount.com\"\n ,\n \"role\": \"roles/editor\"\n }\n ,\n \"etag\": \"BwUjMhCsNvY=\"\n }\n\n2. Update the project's IAM policy:\n\n```\ngcloud projects set-iam-policy PROJECT_ID iam.json\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the IAM page in the GCP Console by visiting https://console.cloud.google.com/iam-admin/iam(https://console.cloud.google.com/iam-admin/iam)\n\n2. Click on the filter table text bar, Type `Role: Service Account User`.\n\n3. Ensure no user is listed as a result of the filter.\n\n4. Click on the filter table text bar, Type `Role: Service Account Token Creator`.\n\n3. Ensure no user is listed as a result of the filter.\n\n**From Google Cloud CLI**\n\nTo ensure IAM users are not assigned Service Account User role at the project level:\n\n```\ngcloud projects get-iam-policy PROJECT_ID --format json | jq '.bindings.role' | grep \"roles/iam.serviceAccountUser\"\n\ngcloud projects get-iam-policy PROJECT_ID --format json | jq '.bindings.role' | grep \"roles/iam.serviceAccountTokenCreator\"\n```\n\nThese commands should not return any output.", + "AdditionalInformation": "To assign the role `roles/iam.serviceAccountUser` or `roles/iam.serviceAccountTokenCreator` to a user role on a service account instead of a project:\n\n1. Go to https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts(https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts)\n\n2. Select ` Target Project`\n\n3. Select `target service account`. Click `Permissions` on the top bar. It will open permission pane on right side of the page\n\n4. Add desired members with `Service Account User` or `Service Account Token Creator` role.", + "References": "https://cloud.google.com/iam/docs/service-accounts:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts:https://cloud.google.com/iam/docs/understanding-roles:https://cloud.google.com/iam/docs/granting-changing-revoking-access:https://console.cloud.google.com/iam-admin/iam" + } + ] + }, + { + "Id": "1.11", + "Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning KMS related roles to users.", + "Checks": [ + "iam_role_kms_enforce_separation_of_duties" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning KMS related roles to users.", + "RationaleStatement": "The built-in/predefined IAM role `Cloud KMS Admin` allows the user/identity to create, delete, and manage service account(s).\nThe built-in/predefined IAM role `Cloud KMS CryptoKey Encrypter/Decrypter` allows the user/identity (with adequate privileges on concerned resources) to encrypt and decrypt data at rest using an encryption key(s).\n\nThe built-in/predefined IAM role `Cloud KMS CryptoKey Encrypter` allows the user/identity (with adequate privileges on concerned resources) to encrypt data at rest using an encryption key(s).\nThe built-in/predefined IAM role `Cloud KMS CryptoKey Decrypter` allows the user/identity (with adequate privileges on concerned resources) to decrypt data at rest using an encryption key(s).\n\nSeparation of duties is the concept of ensuring that one individual does not have all necessary permissions to be able to complete a malicious action. In Cloud KMS, this could be an action such as using a key to access and decrypt data a user should not normally have access to. Separation of duties is a business control typically used in larger organizations, meant to help avoid security or privacy incidents and errors. It is considered best practice.\n\nNo user(s) should have `Cloud KMS Admin` and any of the `Cloud KMS CryptoKey Encrypter/Decrypter`, `Cloud KMS CryptoKey Encrypter`, `Cloud KMS CryptoKey Decrypter` roles assigned at the same time.", + "ImpactStatement": "Removed roles should be assigned to another user based on business needs.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `IAM & Admin/IAM` using `https://console.cloud.google.com/iam-admin/iam`\n\n2. For any member having `Cloud KMS Admin` and any of the `Cloud KMS CryptoKey Encrypter/Decrypter`, `Cloud KMS CryptoKey Encrypter`, `Cloud KMS CryptoKey Decrypter` roles granted/assigned, click the `Delete Bin` icon to remove the role from the member.\n\nNote: Removing a role should be done based on the business requirement.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `IAM & Admin/IAM` by visiting: https://console.cloud.google.com/iam-admin/iam(https://console.cloud.google.com/iam-admin/iam)\n\n2. Ensure no member has the roles `Cloud KMS Admin` and any of the `Cloud KMS CryptoKey Encrypter/Decrypter`, `Cloud KMS CryptoKey Encrypter`, `Cloud KMS CryptoKey Decrypter` assigned.\n\n**From Google Cloud CLI**\n\n1. List all users and role assignments:\n\n```\ngcloud projects get-iam-policy PROJECT_ID\n```\n\n2. Ensure that there are no common users found in the member section for roles `cloudkms.admin` and any one of `Cloud KMS CryptoKey Encrypter/Decrypter`, `Cloud KMS CryptoKey Encrypter`, `Cloud KMS CryptoKey Decrypter`", + "AdditionalInformation": "Users granted with Owner (roles/owner) and Editor (roles/editor) have privileges equivalent to `Cloud KMS Admin` and `Cloud KMS CryptoKey Encrypter/Decrypter`. To avoid misuse, Owner and Editor roles should be granted to a very limited group of users. Use of these primitive privileges should be minimal. These requirements are addressed in separate recommendations.", + "References": "https://cloud.google.com/kms/docs/separation-of-duties" + } + ] + }, + { + "Id": "1.13", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. In this case, unrestricted keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API key usage to trusted hosts, HTTP referrers and apps. It is recommended to use the more secure standard authentication flow instead.", + "Checks": [], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. In this case, unrestricted keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to restrict API key usage to trusted hosts, HTTP referrers and apps. It is recommended to use the more secure standard authentication flow instead.", + "RationaleStatement": "Security risks involved in using API-Keys appear below:\n\n- API keys are simple encrypted strings\n\n- API keys do not identify the user or the application making the API request\n\n- API keys are typically accessible to clients, making it easy to discover and steal an API key\n\nIn light of these potential risks, Google recommends using the standard authentication flow instead of API keys. However, there are limited cases where API keys are more appropriate. For example, if there is a mobile application that needs to use the Google Cloud Translation API, but doesn't otherwise need a backend server, API keys are the simplest way to authenticate to that API.\n\nIn order to reduce attack vectors, API-Keys can be restricted only to trusted hosts, HTTP referrers and applications.", + "ImpactStatement": "Setting `Application Restrictions` may break existing application functioning, if not done carefully.", + "RemediationProcedure": "**From Google Cloud Console**\n\n***Leaving Keys in Place***\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the section `API Keys`, Click the `API Key Name`. The API Key properties display on a new page.\n\n3. In the `Key restrictions` section, set the application restrictions to any of `HTTP referrers, IP addresses, Android apps, iOS apps`.\n\n4. Click `Save`.\n\n1. Repeat steps 2,3,4 for every unrestricted API key.\n**Note:** Do not set `HTTP referrers` to wild-cards (* or *.TLD or *.TLD/*) allowing access to any/wide HTTP referrer(s)\nDo not set `IP addresses` and referrer to `any host (0.0.0.0 or 0.0.0.0/0 or ::0)`\n\n***Removing Keys***\n\nAnother option is to remove the keys entirely.\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the section `API Keys`, select the checkbox next to each key you wish to remove\n\n3. Select `Delete` and confirm.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n1. In the section `API Keys`, Click the `API Key Name`. The API Key properties display on a new page.\n\n1. For every API Key, ensure the section `Key restrictions` parameter `Application restrictions` is not set to `None`.\n\nOr,\n\n1. Ensure `Application restrictions` is set to `HTTP referrers` and the referrer is not set to wild-cards `(* or *.TLD or *.TLD/*) allowing access to any/wide HTTP referrer(s)`\n\nOr,\n\n1. Ensure `Application restrictions` is set to `IP addresses` and referrer is not set to `any host (0.0.0.0 or 0.0.0.0/0 or ::0)`\n\n**From Google Cloud Command Line**\n\n1. Run the following from within the project you wish to audit \n```\ngcloud services api-keys list --filter=\"-restrictions:*\" --format=\"tablebox(displayName:label='Key With No Restrictions')\n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/docs/authentication/api-keys:https://cloud.google.com/sdk/gcloud/reference/services/api-keys/list:https://cloud.google.com/sdk/gcloud/reference/alpha/services/api-keys/update" + } + ] + }, + { + "Id": "1.8", + "Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning service-account related roles to users.", + "Checks": [ + "iam_role_sa_enforce_separation_of_duties" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning service-account related roles to users.", + "RationaleStatement": "The built-in/predefined IAM role `Service Account admin` allows the user/identity to create, delete, and manage service account(s).\nThe built-in/predefined IAM role `Service Account User` allows the user/identity (with adequate privileges on Compute and App Engine) to assign service account(s) to Apps/Compute Instances.\n\nSeparation of duties is the concept of ensuring that one individual does not have all necessary permissions to be able to complete a malicious action. In Cloud IAM - service accounts, this could be an action such as using a service account to access resources that user should not normally have access to.\n\nSeparation of duties is a business control typically used in larger organizations, meant to help avoid security or privacy incidents and errors. It is considered best practice.\n\nNo user should have `Service Account Admin` and `Service Account User` roles assigned at the same time.", + "ImpactStatement": "The removed role should be assigned to a different user based on business needs.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `IAM & Admin/IAM` using `https://console.cloud.google.com/iam-admin/iam`.\n\n2. For any member having both `Service Account Admin` and `Service account User` roles granted/assigned, click the `Delete Bin` icon to remove either role from the member.\nRemoval of a role should be done based on the business requirements.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `IAM & Admin/IAM` using `https://console.cloud.google.com/iam-admin/iam`.\n\n2. Ensure no member has the roles `Service Account Admin` and `Service account User` assigned together.\n\n**From Google Cloud CLI**\n\n1. List all users and role assignments:\n\n```\ngcloud projects get-iam-policy Project_ID --format json | \\\n jq -r '\n (\"Service_Account_Admin_and_User\" | (., map(length*\"-\"))), \n (\n \n .bindings | \n select(.role == \"roles/iam.serviceAccountAdmin\" or .role == \"roles/iam.serviceAccountUser\").members\n | \n group_by(.) | \n map({User: ., Count: length}) | \n . | \n select(.Count == 2).User | \n unique\n )\n | \n . | \n @tsv'\n```\n\n2. All common users listed under `Service_Account_Admin_and_User` are assigned both the `roles/iam.serviceAccountAdmin` and `roles/iam.serviceAccountUser` roles.", + "AdditionalInformation": "Users granted with Owner (roles/owner) and Editor (roles/editor) have privileges equivalent to `Service Account Admin` and `Service Account User`. To avoid the misuse, Owner and Editor roles should be granted to very limited users and Use of these primitive privileges should be minimal. These requirements are addressed in separate recommendations.", + "References": "https://cloud.google.com/iam/docs/service-accounts:https://cloud.google.com/iam/docs/understanding-roles:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts" + } + ] + }, + { + "Id": "1.5", + "Description": "A service account is a special Google account that belongs to an application or a VM, instead of to an individual end-user. The application uses the service account to call the service's Google API so that users aren't directly involved. It's recommended not to use admin access for ServiceAccount.", + "Checks": [ + "iam_sa_no_administrative_privileges" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "A service account is a special Google account that belongs to an application or a VM, instead of to an individual end-user. The application uses the service account to call the service's Google API so that users aren't directly involved. It's recommended not to use admin access for ServiceAccount.", + "RationaleStatement": "Service accounts represent service-level security of the Resources (application or a VM) which can be determined by the roles assigned to it. Enrolling ServiceAccount with Admin rights gives full access to an assigned application or a VM. A ServiceAccount Access holder can perform critical actions like delete, update change settings, etc. without user intervention. For this reason, it's recommended that service accounts not have Admin rights.", + "ImpactStatement": "Removing `*Admin` or `*admin` or `Editor` or `Owner` role assignments from service accounts may break functionality that uses impacted service accounts. Required role(s) should be assigned to impacted service accounts in order to restore broken functionalities.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `IAM & admin/IAM` using `https://console.cloud.google.com/iam-admin/iam`\n2. Go to the `Members`\n3. Identify `User-Managed user created` service account with roles containing `*Admin` or `*admin` or role matching `Editor` or role matching `Owner`\n4. Click the `Delete bin` icon to remove the role from the member (service account in this case)\n\n**From Google Cloud CLI**\n\n```\ngcloud projects get-iam-policy PROJECT_ID --format json > iam.json\n```\n\n1. Using a text editor, Remove `Role` which contains `roles/*Admin` or `roles/*admin` or matched `roles/editor` or matches 'roles/owner`. Add a role to the bindings array that defines the group members and the role for those members. \n\nFor example, to grant the role roles/appengine.appViewer to the `ServiceAccount` which is roles/editor, you would change the example shown below as follows:\n\n {\n \"bindings\": \n {\n \"members\": \n \"serviceAccount:our-project-123@appspot.gserviceaccount.com\",\n ,\n \"role\": \"roles/appengine.appViewer\"\n },\n {\n \"members\": \n \"user:email1@gmail.com\"\n ,\n \"role\": \"roles/owner\"\n },\n {\n \"members\": \n \"serviceAccount:our-project-123@appspot.gserviceaccount.com\",\n \"serviceAccount:123456789012-compute@developer.gserviceaccount.com\"\n ,\n \"role\": \"roles/editor\"\n }\n ,\n \"etag\": \"BwUjMhCsNvY=\"\n }\n2. Update the project's IAM policy:\n\n```\ngcloud projects set-iam-policy PROJECT_ID iam.json\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `IAM & admin/IAM` using `https://console.cloud.google.com/iam-admin/iam`\n2. Go to the `Members`\n3. Ensure that there are no `User-Managed user created service account(s)` with roles containing `*Admin` or `*admin` or role matching `Editor` or role matching `Owner`\n\n**From Google Cloud CLI**\n\n1. Get the policy that you want to modify, and write it to a JSON file:\n\n```\ngcloud projects get-iam-policy PROJECT_ID --format json > iam.json\n```\n\n2. The contents of the JSON file will look similar to the following. Note that `role` of members group associated with each `serviceaccount` does not contain `*Admin` or `*admin` or does not match `roles/editor` or does not match `roles/owner`.\n\nThis recommendation is only applicable to `User-Managed user-created` service accounts. These accounts have the nomenclature: `SERVICE_ACCOUNT_NAME@PROJECT_ID.iam.gserviceaccount.com`. Note that some Google-managed, Google-created service accounts have the same naming format, and should be excluded (e.g., `appsdev-apps-dev-script-auth@system.gserviceaccount.com` which needs the Owner role).\n\n**Sample Json output:**\n\n {\n \"bindings\": \n {\n \"members\": \n \"serviceAccount:our-project-123@appspot.gserviceaccount.com\",\n ,\n \"role\": \"roles/appengine.appAdmin\"\n },\n {\n \"members\": \n \"user:email1@gmail.com\"\n ,\n \"role\": \"roles/owner\"\n },\n {\n \"members\": \n \"serviceAccount:our-project-123@appspot.gserviceaccount.com\",\n \"serviceAccount:123456789012-compute@developer.gserviceaccount.com\"\n ,\n \"role\": \"roles/editor\"\n }\n ,\n \"etag\": \"BwUjMhCsNvY=\",\n \"version\": 1\n }", + "AdditionalInformation": "Default (user-managed but not user-created) service accounts have the `Editor (roles/editor)` role assigned to them to support GCP services they offer. \nSuch Service accounts are: `PROJECT_NUMBER-compute@developer.gserviceaccount.com`, `PROJECT_ID@appspot.gserviceaccount.com`.", + "References": "https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/:https://cloud.google.com/iam/docs/understanding-roles:https://cloud.google.com/iam/docs/understanding-service-accounts" + } + ] + }, + { + "Id": "1.4", + "Description": "User managed service accounts should not have user-managed keys.", + "Checks": [ + "iam_sa_no_user_managed_keys" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "User managed service accounts should not have user-managed keys.", + "RationaleStatement": "Anyone who has access to the keys will be able to access resources through the service account. \nGCP-managed keys are used by Cloud Platform services such as App Engine and Compute Engine. These keys cannot be downloaded. Google will keep the keys and automatically rotate them on an approximately weekly basis.\nUser-managed keys are created, downloadable, and managed by users. They expire 10 years from creation.\n\nFor user-managed keys, the user has to take ownership of key management activities which include:\n- Key storage\n- Key distribution\n- Key revocation\n- Key rotation\n- Protecting the keys from unauthorized users\n- Key recovery\n\nEven with key owner precautions, keys can be easily leaked by common development malpractices like checking keys into the source code or leaving them in the Downloads directory, or accidentally leaving them on support blogs/channels.\n\nIt is recommended to prevent user-managed service account keys.", + "ImpactStatement": "Deleting user-managed Service Account Keys may break communication with the applications using the corresponding keys.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the IAM page in the GCP Console using `https://console.cloud.google.com/iam-admin/iam`\n\n2. In the left navigation pane, click `Service accounts`. All service accounts and their corresponding keys are listed.\n\n3. Click the service account.\n\n4. Click the `edit` and delete the keys.\n\n**From Google Cloud CLI**\n\nTo delete a user managed Service Account Key,\n\n```\ngcloud iam service-accounts keys delete --iam-account= \n```\n\n**Prevention:**\nYou can disable service account key creation through the `Disable service account key creation` Organization policy by visiting https://console.cloud.google.com/iam-admin/orgpolicies/iam-disableServiceAccountKeyCreation(https://console.cloud.google.com/iam-admin/orgpolicies/iam-disableServiceAccountKeyCreation). Learn more at: https://cloud.google.com/resource-manager/docs/organization-policy/restricting-service-accounts(https://cloud.google.com/resource-manager/docs/organization-policy/restricting-service-accounts)\n\nIn addition, if you do not need to have service accounts in your project, you can also prevent the creation of service accounts through the `Disable service account creation` Organization policy: https://console.cloud.google.com/iam-admin/orgpolicies/iam-disableServiceAccountCreation(https://console.cloud.google.com/iam-admin/orgpolicies/iam-disableServiceAccountCreation).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the IAM page in the GCP Console using `https://console.cloud.google.com/iam-admin/iam`\n\n2. In the left navigation pane, click `Service accounts`. All service accounts and their corresponding keys are listed.\n\n3. Click the service accounts and check if keys exist.\n\n**From Google Cloud CLI**\n\nList All the service accounts:\n\n```\ngcloud iam service-accounts list\n```\nIdentify user-managed service accounts as such account `EMAIL` ends with `iam.gserviceaccount.com`\n\nFor each user-managed service account, list the keys managed by the user:\n```\ngcloud iam service-accounts keys list --iam-account= --managed-by=user\n```\nNo keys should be listed.", + "AdditionalInformation": "A user-managed key cannot be created on GCP-Managed Service Accounts.", + "References": "https://cloud.google.com/iam/docs/understanding-service-accounts#managing_service_account_keys:https://cloud.google.com/resource-manager/docs/organization-policy/restricting-service-accounts" + } + ] + }, + { + "Id": "1.7", + "Description": "Service Account keys consist of a key ID (Private_key_Id) and Private key, which are used to sign programmatic requests users make to Google cloud services accessible to that particular service account. It is recommended that all Service Account keys are regularly rotated.", + "Checks": [ + "iam_sa_user_managed_key_rotate_90_days" + ], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Service Account keys consist of a key ID (Private_key_Id) and Private key, which are used to sign programmatic requests users make to Google cloud services accessible to that particular service account. It is recommended that all Service Account keys are regularly rotated.", + "RationaleStatement": "Rotating Service Account keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used. Service Account keys should be rotated to ensure that data cannot be accessed with an old key that might have been lost, cracked, or stolen.\n\nEach service account is associated with a key pair managed by Google Cloud Platform (GCP). It is used for service-to-service authentication within GCP. Google rotates the keys daily.\n\nGCP provides the option to create one or more user-managed (also called external key pairs) key pairs for use from outside GCP (for example, for use with Application Default Credentials). When a new key pair is created, the user is required to download the private key (which is not retained by Google). With external keys, users are responsible for keeping the private key secure and other management operations such as key rotation. External keys can be managed by the IAM API, gcloud command-line tool, or the Service Accounts page in the Google Cloud Platform Console. GCP facilitates up to 10 external service account keys per service account to facilitate key rotation.", + "ImpactStatement": "Rotating service account keys will break communication for dependent applications. Dependent applications need to be configured manually with the new key `ID` displayed in the `Service account keys` section and the `private key` downloaded by the user.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Delete any external (user-managed) Service Account Key older than 90 days:**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the Section `Service Account Keys`, for every external (user-managed) service account key where `creation date` is greater than or equal to the past 90 days, click `Delete Bin Icon` to `Delete Service Account key`\n\n**Create a new external (user-managed) Service Account Key for a Service Account:**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. Click `Create Credentials` and Select `Service Account Key`.\n\n3. Choose the service account in the drop-down list for which an External (user-managed) Service Account key needs to be created.\n\n4. Select the desired key type format among `JSON` or `P12`.\n\n5. Click `Create`. It will download the `private key`. Keep it safe. \n\n6. Click `Close` if prompted. \n\n7. The site will redirect to the `APIs & Services\\Credentials` page. Make a note of the new `ID` displayed in the `Service account keys` section.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `APIs & Services\\Credentials` using `https://console.cloud.google.com/apis/credentials`\n\n2. In the section `Service Account Keys`, for every External (user-managed) service account key listed ensure the `creation date` is within the past 90 days.\n\n**From Google Cloud CLI**\n\n1. List all Service accounts from a project.\n\n```\ngcloud iam service-accounts list\n```\n\n2. For every service account list service account keys.\n\n```\ngcloud iam service-accounts keys list --iam-account Service_Account_Email_Id --format=json\n```\n\n3. Ensure every service account key for a service account has a `\"validAfterTime\"` value within the past 90 days.", + "AdditionalInformation": "For user-managed Service Account key(s), key management is entirely the user's responsibility.", + "References": "https://cloud.google.com/iam/docs/understanding-service-accounts#managing_service_account_keys:https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/keys/list:https://cloud.google.com/iam/docs/service-accounts" + } + ] + }, + { + "Id": "1.18", + "Description": "Google Cloud Functions allow you to host serverless code that is executed when an event is triggered, without the requiring the management a host operating system. These functions can also store environment variables to be used by the code that may contain authentication or other information that needs to remain confidential.", + "Checks": [], + "Attributes": [ + { + "Section": "1. Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Google Cloud Functions allow you to host serverless code that is executed when an event is triggered, without the requiring the management a host operating system. These functions can also store environment variables to be used by the code that may contain authentication or other information that needs to remain confidential.", + "RationaleStatement": "It is recommended to use the Secret Manager, because environment variables are stored unencrypted, and accessible for all users who have access to the code.", + "ImpactStatement": "There should be no impact on the Cloud Function. There are minor costs after 10,000 requests a month to the Secret Manager API as well for a high use of other functions. Modifying the Cloud Function to use the Secret Manager may prevent it running to completion.", + "RemediationProcedure": "Enable Secret Manager API for your Project\n\n**From Google Cloud Console**\n1. Within the project you wish to enable, select the Navigation hamburger menu in the top left. Hover over 'APIs & Services' to under the heading 'Serverless', then select 'Enabled APIs & Services' in the menu that opens up.\n2. Click the button '+ Enable APIS and Services'\n3. In the Search bar, search for 'Secret Manager API' and select it.\n4. Click the blue box that says 'Enable'.\n\n**From Google Cloud CLI**\n1. Within the project you wish to enable the API in, run the following command.\n```\ngcloud services enable Secret Manager API \n```\n\nReviewing Environment Variables That Should Be Migrated to Secret Manager\n\n**From Google Cloud Console**\n1. Log in to the Google Cloud Web Portal (https://console.cloud.google.com/)\n1. Go to Cloud Functions\n1. Click on a function name from the list\n1. Click on Edit and review the Runtime environment for variables that should be secrets. Leave this list open for the next step.\n\n**From Google Cloud CLI**\n1. To view a list of your cloud functions run\n```\ngcloud functions list\n```\n2. For each cloud function run the following command.\n```\ngcloud functions describe \n```\n3. Review the settings of the buildEnvironmentVariables and environmentVariables. Keep this information for the next step.\n\nMigrating Environment Variables to Secrets within the Secret Manager\n\n**From Google Cloud Console**\n1. Go to the Secret Manager page in the Cloud Console.\n1. On the Secret Manager page, click Create Secret.\n1. On the Create secret page, under Name, enter the name of the Environment Variable you are replacing. This will then be the Secret Variable you will reference in your code.\n1. You will also need to add a version. This is the actual value of the variable that will be referenced from the code. To add a secret version when creating the initial secret, in the Secret value field, enter the value from the Environment Variable you are replacing.\n1. Leave the Regions section unchanged.\n1. Click the Create secret button.\n1. Repeat for all Environment Variables\n\n**From Google Cloud CLI**\n1. Run the following command with the Environment Variable name you are replacing in the ``. It is most secure to point this command to a file with the Environment Variable value located in it, as if you entered it via command line it would show up in your shell’s command history.\n```\ngcloud secrets create --data-file=\"/path/to/file.txt\"\n```\n\nGranting your Runtime's Service Account Access to Secrets\n\n**From Google Cloud Console**\n1. Within the project containing your runtime login with account that has the 'roles/secretmanager.secretAccessor' permission. \n2. Select the Navigation hamburger menu in the top left. Hover over 'Security' to under the then select 'Secret Manager' in the menu that opens up.\n3. Click the name of a secret listed in this screen.\n4. If it is not already open, click Show Info Panel in this screen to open the panel.\n5.In the info panel, click Add principal.\n6.In the New principals field, enter the service account your function uses for its identity. (If you need help locating or updating your runtime's service account, please see the 'docs/securing/function-identity#runtime_service_account' reference.)\n7. In the Select a role dropdown, choose Secret Manager and then Secret Manager Secret Accessor.\n\n**From Google Cloud CLI**\nAs of the time of writing, using Google CLI to list Runtime variables is only in beta. Because this is likely to change we are not including it here.\n\nModifying the Code to use the Secrets in Secret Manager\n\n**From Google Cloud Console**\nThis depends heavily on which language your runtime is in. For the sake of the brevity of this recommendation, please see the '/docs/creating-and-accessing-secrets#access' reference for language specific instructions.\n\n**From Google Cloud CLI**\nThis depends heavily on which language your runtime is in. For the sake of the brevity of this recommendation, please see the' /docs/creating-and-accessing-secrets#access' reference for language specific instructions.\n\nDeleting the Insecure Environment Variables\n\n**Be certain to do this step last.** Removing variables from code actively referencing them will prevent it from completing successfully.\n\n**From Google Cloud Console**\n1. Select the Navigation hamburger menu in the top left. Hover over 'Security' then select 'Secret Manager' in the menu that opens up.\n1. Click the name of a function. Click Edit.\n1. Click Runtime, build and connections settings to expand the advanced configuration options.\n1. Click 'Security’. Hover over the secret you want to remove, then click 'Delete'.\n1. Click Next. Click Deploy. The latest version of the runtime will now reference the secrets in Secret Manager.\n\n**From Google Cloud CLI**\n```\ngcloud functions deploy --remove-env-vars \n```\nIf you need to find the env vars to remove, they are from the step where ‘gcloud functions describe ``’ was run.", + "AuditProcedure": "Determine if Confidential Information is Stored in your Functions in Cleartext\n\n**From Google Cloud Console**\n1. Within the project you wish to audit, select the Navigation hamburger menu in the top left. Scroll down to under the heading 'Serverless', then select 'Cloud Functions'\n1. Click on a function name from the list\n1. Open the Variables tab and you will see both buildEnvironmentVariables and environmentVariables\n1. Review the variables whether they are secrets\n1. Repeat step 3-5 until all functions are reviewed\n\n**From Google Cloud CLI**\n1. To view a list of your cloud functions run\n```\ngcloud functions list\n```\n2. For each cloud function in the list run the following command.\n```\ngcloud functions describe \n```\n3. Review the settings of the buildEnvironmentVariables and environmentVariables. Determine if this is data that should not be publicly accessible.\n\nDetermine if Secret Manager API is 'Enabled' for your Project\n\n**From Google Cloud Console**\n1. Within the project you wish to audit, select the Navigation hamburger menu in the top left. Hover over 'APIs & Services' to under the heading 'Serverless', then select 'Enabled APIs & Services' in the menu that opens up.\n1. Click the button '+ Enable APIS and Services'\n1. In the Search bar, search for 'Secret Manager API' and select it.\n1. If it is enabled, the blue box that normally says 'Enable' will instead say 'Manage'.\n\n**From Google Cloud CLI**\n1. Within the project you wish to audit, run the following command.\n```\ngcloud services list\n```\n2. If 'Secret Manager API' is in the list, it is enabled.", + "AdditionalInformation": "There are slight additional costs to using the Secret Manager API. Review the documentation to determine your organizations' needs.", + "References": "https://cloud.google.com/functions/docs/configuring/env-var#managing_secrets:https://cloud.google.com/secret-manager/docs/overview" + } + ] + }, + { + "Id": "2.15", + "Description": "GCP Access Approval enables you to require your organizations' explicit approval whenever Google support try to access your projects. You can then select users within your organization who can approve these requests through giving them a security role in IAM. All access requests display which Google Employee requested them in an email or Pub/Sub message that you can choose to Approve. This adds an additional control and logging of who in your organization approved/denied these requests.", + "Checks": [ + "iam_account_access_approval_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "GCP Access Approval enables you to require your organizations' explicit approval whenever Google support try to access your projects. You can then select users within your organization who can approve these requests through giving them a security role in IAM. All access requests display which Google Employee requested them in an email or Pub/Sub message that you can choose to Approve. This adds an additional control and logging of who in your organization approved/denied these requests.", + "RationaleStatement": "Controlling access to your information is one of the foundations of information security. Google Employees do have access to your organizations' projects for support reasons. With Access Approval, organizations can then be certain that their information is accessed by only approved Google Personnel.", + "ImpactStatement": "To use Access Approval your organization will need have enabled Access Transparency and have at one of the following support level: Enhanced or Premium. There will be subscription costs associated with these support levels, as well as increased storage costs for storing the logs. You will also not be able to turn the Access Transparency which Access Approval depends on, off yourself. To do so you will need to submit a service request to Google Cloud Support. There will also be additional overhead in managing user permissions. There may also be a potential delay in support times as Google Personnel will have to wait for their access to be approved.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. From the Google Cloud Home, within the project you wish to enable, click on the Navigation hamburger menu in the top left. Hover over the `Security` Menu. Select `Access Approval` in the middle of the column that opens. \n\n2. The status will be displayed here. On this screen, there is an option to click `Enroll`. If it is greyed out and you see an error bar at the top of the screen that says `Access Transparency is not enabled` please view the corresponding reference within this section to enable it.\n\n3. In the second screen click `Enroll`.\n\n**Grant an IAM Group or User the role with permissions to Add Users to be Access Approval message Recipients**\n\n1. From the Google Cloud Home, within the project you wish to enable, click on the Navigation hamburger menu in the top left. Hover over the `IAM and Admin`. Select `IAM` in the middle of the column that opens. \n\n2. Click the blue button the says `+ ADD` at the top of the screen.\n\n3. In the `principals` field, select a user or group by typing in their associated email address.\n\n4. Click on the role field to expand it. In the filter field enter `Access Approval Approver` and select it.\n\n5. Click `save`.\n\n**Add a Group or User as an Approver for Access Approval Requests**\n\n1. As a user with the `Access Approval Approver` permission, within the project where you wish to add an email address to which request will be sent, click on the Navigation hamburger menu in the top left. Hover over the `Security` Menu. Select `Access Approval` in the middle of the column that opens. \n\n2. Click `Manage Settings`\n\n3. Under `Set up approval notifications`, enter the email address associated with a Google Cloud User or Group you wish to send Access Approval requests to. All future access approvals will be sent as emails to this address.\n\n**From Google Cloud CLI**\n\n1. To update all services in an entire project, run the following command from an account that has permissions as an 'Approver for Access Approval Requests'\n\n```\ngcloud access-approval settings update --project= --enrolled_services=all --notification_emails='@'\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n**Determine if Access Transparency is Enabled as it is a Dependency**\n\n1. From the Google Cloud Home inside the project you wish to audit, click on the Navigation hamburger menu in the top left. Hover over the `IAM & Admin` Menu. Select `settings` in the middle of the column that opens.\n\n2. The status should be \"Enabled' under the heading `Access Transparency`\n\n**Determine if Access Approval is Enabled**\n\n1. From the Google Cloud Home, within the project you wish to check, click on the Navigation hamburger menu in the top left. Hover over the `Security` Menu. Select `Access Approval` in the middle of the column that opens. \n\n2. The status will be displayed here. If you see a screen saying you need to enroll in Access Approval, it is not enabled.\n\n**From Google Cloud CLI**\n\n**Determine if Access Approval is Enabled**\n1. From within the project you wish to audit, run the following command.\n```\ngcloud access-approval settings get\n```\n2. The status will be displayed in the output.\n\nIF Access Approval is not enabled you should get this output:\n```\nAPI accessapproval.googleapis.com not enabled on project -----. Would you like to enable and retry (this will take a few minutes)? (y/N)?\n```\nAfter entering `Y` if you get the following output, it means that `Access Transparency` is not enabled:\n```\nERROR: (gcloud.access-approval.settings.get) FAILED_PRECONDITION: Precondition check failed.\n```", + "AdditionalInformation": "The recipients of Access Requests will also need to be logged into a Google Cloud account associated with an email address in this list. To approve requests they can click approve within the email. Or they can view requests at the the Access Approval page within the Security submenu.", + "References": "https://cloud.google.com/cloud-provider-access-management/access-approval/docs:https://cloud.google.com/cloud-provider-access-management/access-approval/docs/overview:https://cloud.google.com/cloud-provider-access-management/access-approval/docs/quickstart-custom-key:https://cloud.google.com/cloud-provider-access-management/access-approval/docs/supported-services:https://cloud.google.com/cloud-provider-access-management/access-approval/docs/view-historical-requests" + } + ] + }, + { + "Id": "2.13", + "Description": "GCP Cloud Asset Inventory is services that provides a historical view of GCP resources and IAM policies through a time-series database. The information recorded includes metadata on Google Cloud resources, metadata on policies set on Google Cloud projects or resources, and runtime information gathered within a Google Cloud resource.", + "Checks": [ + "serviceusage_cloudasset_inventory_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "GCP Cloud Asset Inventory is services that provides a historical view of GCP resources and IAM policies through a time-series database. The information recorded includes metadata on Google Cloud resources, metadata on policies set on Google Cloud projects or resources, and runtime information gathered within a Google Cloud resource.", + "RationaleStatement": "The GCP resources and IAM policies captured by GCP Cloud Asset Inventory enables security analysis, resource change tracking, and compliance auditing.\n\nIt is recommended GCP Cloud Asset Inventory be enabled for all GCP projects.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud Console**\n\nEnable the Cloud Asset API:\n\n1. Go to `API & Services/Library` by visiting https://console.cloud.google.com/apis/library(https://console.cloud.google.com/apis/library)\n2. Search for `Cloud Asset API` and select the result for _Cloud Asset API_\n3. Click the `ENABLE` button.\n\n**From Google Cloud CLI**\n\nEnable the Cloud Asset API:\n\n1. Enable the Cloud Asset API through the services interface:\n```\ngcloud services enable cloudasset.googleapis.com\n```", + "AuditProcedure": "**From Google Cloud Console**\n\nEnsure that the Cloud Asset API is enabled:\n\n1. Go to `API & Services/Library` by visiting https://console.cloud.google.com/apis/library(https://console.cloud.google.com/apis/library)\n2. Search for `Cloud Asset API` and select the result for _Cloud Asset API_\n3. Ensure that `API Enabled` is displayed.\n\n**From Google Cloud CLI**\n\nEnsure that the Cloud Asset API is enabled:\n\n1. Query enabled services:\n```\ngcloud services list --enabled --filter=name:cloudasset.googleapis.com\n```\nIf the API is listed, then it is enabled. If the response is `Listed 0 items` the API is not enabled.", + "AdditionalInformation": "Additional info\n- Cloud Asset Inventory only keeps a five-week history of Google Cloud asset metadata. If a longer history is desired, automation to export the history to Cloud Storage or BigQuery should be evaluated.", + "References": "https://cloud.google.com/asset-inventory/docs" + } + ] + }, + { + "Id": "2.4", + "Description": "In order to prevent unnecessary project ownership assignments to users/service-accounts and further misuses of projects and resources, all `roles/Owner` assignments should be monitored.\n\nMembers (users/Service-Accounts) with a role assignment to primitive role `roles/Owner` are project owners.\n\nThe project owner has all the privileges on the project the role belongs to. These are summarized below:\n- All viewer permissions on all GCP Services within the project\n- Permissions for actions that modify the state of all GCP services within the project\n- Manage roles and permissions for a project and all resources within the project\n- Set up billing for a project\n\nGranting the owner role to a member (user/Service-Account) will allow that member to modify the Identity and Access Management (IAM) policy. Therefore, grant the owner role only if the member has a legitimate purpose to manage the IAM policy. This is because the project IAM policy contains sensitive access control data. Having a minimal set of users allowed to manage IAM policy will simplify any auditing that may be necessary.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_project_ownership_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "In order to prevent unnecessary project ownership assignments to users/service-accounts and further misuses of projects and resources, all `roles/Owner` assignments should be monitored.\n\nMembers (users/Service-Accounts) with a role assignment to primitive role `roles/Owner` are project owners.\n\nThe project owner has all the privileges on the project the role belongs to. These are summarized below:\n- All viewer permissions on all GCP Services within the project\n- Permissions for actions that modify the state of all GCP services within the project\n- Manage roles and permissions for a project and all resources within the project\n- Set up billing for a project\n\nGranting the owner role to a member (user/Service-Account) will allow that member to modify the Identity and Access Management (IAM) policy. Therefore, grant the owner role only if the member has a legitimate purpose to manage the IAM policy. This is because the project IAM policy contains sensitive access control data. Having a minimal set of users allowed to manage IAM policy will simplify any auditing that may be necessary.", + "RationaleStatement": "Project ownership has the highest level of privileges on a project. To avoid misuse of project resources, the project ownership assignment/change actions mentioned above should be monitored and alerted to concerned recipients.\n- Sending project ownership invites\n- Acceptance/Rejection of project ownership invite by user\n- Adding `role\\Owner` to a user/service-account\n- Removing a user/Service account from `role\\Owner`", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Create the prescribed log metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n2. Click the down arrow symbol on the `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`.\n\n3. Clear any text and add: \n\n```\n(protoPayload.serviceName=\"cloudresourcemanager.googleapis.com\") \nAND (ProjectOwnership OR projectOwnerInvitee) \nOR (protoPayload.serviceData.policyDelta.bindingDeltas.action=\"REMOVE\" \nAND protoPayload.serviceData.policyDelta.bindingDeltas.role=\"roles/owner\") \nOR (protoPayload.serviceData.policyDelta.bindingDeltas.action=\"ADD\" \nAND protoPayload.serviceData.policyDelta.bindingDeltas.role=\"roles/owner\")\n```\n\n4. Click `Submit Filter`. The logs display based on the filter text entered by the user.\n\n5. In the `Metric Editor` menu on the right, fill out the name field. Set `Units` to `1` (default) and the `Type` to `Counter`. This ensures that the log metric counts the number of log entries matching the advanced logs query.\n\n6. Click `Create Metric`. \n\n**Create the display prescribed Alert Policy:** \n\n1. Identify the newly created metric under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the desired metric and select `Create alert from Metric`. A new page opens.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n\n4. Configure the desired notifications channels in the section `Notifications`.\n\n5. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate a prescribed Log Metric:\n- Use the command: gcloud beta logging metrics create \n- Reference for Command Usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create\n\nCreate prescribed Alert Policy \n- Use the command: gcloud alpha monitoring policies create\n- Reference for Command Usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create", + "AuditProcedure": "**From Google Cloud Console**\n\n**Ensure that the prescribed log metric is present:**\n\n1. Go to `Logging/Log-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure that at least one metric `` is present with filter text:\n\n```\n(protoPayload.serviceName=\"cloudresourcemanager.googleapis.com\") \nAND (ProjectOwnership OR projectOwnerInvitee) \nOR (protoPayload.serviceData.policyDelta.bindingDeltas.action=\"REMOVE\" \nAND protoPayload.serviceData.policyDelta.bindingDeltas.role=\"roles/owner\") \nOR (protoPayload.serviceData.policyDelta.bindingDeltas.action=\"ADD\" \nAND protoPayload.serviceData.policyDelta.bindingDeltas.role=\"roles/owner\")\n```\n\n**Ensure that the prescribed Alerting Policy is present:**\n\n3. Go to `Alerting` by visiting https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of zero(0) for greater than zero(0) seconds` means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for your organization.\n\n5. Ensure that the appropriate notifications channels have been set up.\n\n**From Google Cloud CLI**\n\n**Ensure that the prescribed log metric is present:**\n\n1. List the log metrics:\n```\ngcloud logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with filter set to: \n```\n(protoPayload.serviceName=\"cloudresourcemanager.googleapis.com\") \nAND (ProjectOwnership OR projectOwnerInvitee) \nOR (protoPayload.serviceData.policyDelta.bindingDeltas.action=\"REMOVE\" \nAND protoPayload.serviceData.policyDelta.bindingDeltas.role=\"roles/owner\") \nOR (protoPayload.serviceData.policyDelta.bindingDeltas.action=\"ADD\" \nAND protoPayload.serviceData.policyDelta.bindingDeltas.role=\"roles/owner\")\n```\n\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure that the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains an least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`", + "AdditionalInformation": "1. Project ownership assignments for a user cannot be done using the gcloud utility as assigning project ownership to a user requires sending, and the user accepting, an invitation. \n\n2. Project Ownership assignment to a service account does not send any invites. SetIAMPolicy to `role/owner`is directly performed on service accounts.", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging" + } + ] + }, + { + "Id": "2.16", + "Description": "Logging enabled on a HTTPS Load Balancer will show all network traffic and its destination.", + "Checks": [ + "compute_loadbalancer_logging_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Logging enabled on a HTTPS Load Balancer will show all network traffic and its destination.", + "RationaleStatement": "Logging will allow you to view HTTPS network traffic to your web applications.", + "ImpactStatement": "On high use systems with a high percentage sample rate, the logging file may grow to high capacity in a short amount of time. Ensure that the sample rate is set appropriately so that storage costs are not exorbitant.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. From Google Cloud home open the Navigation Menu in the top left.\n\n1. Under the `Networking` heading select `Network services`.\n\n1. Select the HTTPS load-balancer you wish to audit.\n\n1. Select `Edit` then `Backend Configuration`. \n\n1. Select `Edit` on the corresponding backend service.\n\n1. Click `Enable Logging`.\n\n1. Set `Sample Rate` to a desired value. This is a percentage as a decimal point. 1.0 is 100%.\n\n**From Google Cloud CLI**\n\n1. Run the following command\n\n```\ngcloud compute backend-services update --region=REGION --enable-logging --logging-sample-rate=\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. From Google Cloud home open the Navigation Menu in the top left.\n\n1. Under the `Networking` heading select `Network services`.\n\n1. Select the HTTPS load-balancer you wish to audit.\n\n1. Select `Edit` then `Backend Configuration`. \n\n1. Select `Edit` on the corresponding backend service.\n\n1. Ensure that `Enable Logging` is selected. Also ensure that `Sample Rate` is set to an appropriate level for your needs.\n\n**From Google Cloud CLI**\n\n1. Run the following command\n\n```\ngcloud compute backend-services describe \n```\n\n1. Ensure that ```enable-logging``` is enabled and ```sample rate``` is set to your desired level.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/load-balancing/:https://cloud.google.com/load-balancing/docs/https/https-logging-monitoring#gcloud:-global-mode:https://cloud.google.com/sdk/gcloud/reference/compute/backend-services/" + } + ] + }, + { + "Id": "2.1", + "Description": "It is recommended that Cloud Audit Logging is configured to track all admin activities and read, write access to user data.", + "Checks": [ + "iam_audit_logs_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended that Cloud Audit Logging is configured to track all admin activities and read, write access to user data.", + "RationaleStatement": "Cloud Audit Logging maintains two audit logs for each project, folder, and organization: Admin Activity and Data Access.\n\n1. Admin Activity logs contain log entries for API calls or other administrative actions that modify the configuration or metadata of resources. Admin Activity audit logs are enabled for all services and cannot be configured.\n\n2. Data Access audit logs record API calls that create, modify, or read user-provided data. These are disabled by default and should be enabled.\n\n There are three kinds of Data Access audit log information:\n\n - Admin read: Records operations that read metadata or configuration information. Admin Activity audit logs record writes of metadata and configuration information that cannot be disabled.\n - Data read: Records operations that read user-provided data.\n - Data write: Records operations that write user-provided data.\n\nIt is recommended to have an effective default audit config configured in such a way that:\n\n1. logtype is set to DATA_READ (to log user activity tracking) and DATA_WRITES (to log changes/tampering to user data).\n\n2. audit config is enabled for all the services supported by the Data Access audit logs feature.\n\n3. Logs should be captured for all users, i.e., there are no exempted users in any of the audit config sections. This will ensure overriding the audit config will not contradict the requirement.", + "ImpactStatement": "There is no charge for Admin Activity audit logs.\nEnabling the Data Access audit logs might result in your project being charged for the additional logs usage.", + "RemediationProcedure": "**From Google Cloud Console**\n1. Go to `Audit Logs` by visiting https://console.cloud.google.com/iam-admin/audit(https://console.cloud.google.com/iam-admin/audit).\n2. Follow the steps at https://cloud.google.com/logging/docs/audit/configure-data-access(https://cloud.google.com/logging/docs/audit/configure-data-access) to enable audit logs for all Google Cloud services. Ensure that no exemptions are allowed.\n\n**From Google Cloud CLI**\n\n1. To read the project's IAM policy and store it in a file run a command:\n\n```\ngcloud projects get-iam-policy PROJECT_ID > /tmp/project_policy.yaml\n```\n\nAlternatively, the policy can be set at the organization or folder level. If setting the policy at the organization level, it is not necessary to also set it for each folder or project.\n\n```\ngcloud organizations get-iam-policy ORGANIZATION_ID > /tmp/org_policy.yaml\ngcloud resource-manager folders get-iam-policy FOLDER_ID > /tmp/folder_policy.yaml\n```\n\n2. Edit policy in /tmp/policy.yaml, adding or changing only the audit logs configuration to:\n**Note: Admin Activity Logs are enabled by default, and cannot be disabled. So they are not listed in these configuration changes.**\n```\nauditConfigs:\n- auditLogConfigs:\n - logType: DATA_WRITE\n - logType: DATA_READ\n service: allServices\n```\n\n**Note:** `exemptedMembers:` is not set as audit logging should be enabled for all the users\n\n3. To write new IAM policy run command:\n\n```\ngcloud organizations set-iam-policy ORGANIZATION_ID /tmp/org_policy.yaml\ngcloud resource-manager folders set-iam-policy FOLDER_ID /tmp/folder_policy.yaml\ngcloud projects set-iam-policy PROJECT_ID /tmp/project_policy.yaml\n```\n\nIf the preceding command reports a conflict with another change, then repeat these steps, starting with the first step.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Audit Logs` by visiting https://console.cloud.google.com/iam-admin/audit(https://console.cloud.google.com/iam-admin/audit).\n2. Ensure that Admin Read, Data Write, and Data Read are enabled for all Google Cloud services and that no exemptions are allowed.\n\n**From Google Cloud CLI**\n\n1. List the Identity and Access Management (IAM) policies for the project, folder, or organization: \n```\ngcloud organizations get-iam-policy ORGANIZATION_ID\ngcloud resource-manager folders get-iam-policy FOLDER_ID\ngcloud projects get-iam-policy PROJECT_ID\n```\n2. Policy should have a default auditConfigs section which has the logtype set to DATA_WRITES and DATA_READ for all services. Note that projects inherit settings from folders, which in turn inherit settings from the organization. When called, projects get-iam-policy, the result shows only the policies set in the project, not the policies inherited from the parent folder or organization. Nevertheless, if the parent folder has Cloud Audit Logging enabled, the project does as well. \n\nSample output for default audit configs may look like this:\n\n```\n auditConfigs:\n - auditLogConfigs:\n - logType: ADMIN_READ\n - logType: DATA_WRITE\n - logType: DATA_READ\n service: allServices\n```\n\n3. Any of the auditConfigs sections should not have parameter \"exemptedMembers:\" set, which will ensure that Logging is enabled for all users and no user is exempted.", + "AdditionalInformation": "'- Log type `DATA_READ` is equally important to that of `DATA_WRITE` to track detailed user activities.\n- BigQuery Data Access logs are handled differently from other data access logs. BigQuery logs are enabled by default and cannot be disabled. They do not count against logs allotment and cannot result in extra logs charges.", + "References": "https://cloud.google.com/logging/docs/audit/:https://cloud.google.com/logging/docs/audit/configure-data-access" + } + ] + }, + { + "Id": "2.12", + "Description": "Cloud DNS logging records the queries from the name servers within your VPC to Stackdriver. Logged queries can come from Compute Engine VMs, GKE containers, or other GCP resources provisioned within the VPC.", + "Checks": [ + "compute_network_dns_logging_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Cloud DNS logging records the queries from the name servers within your VPC to Stackdriver. Logged queries can come from Compute Engine VMs, GKE containers, or other GCP resources provisioned within the VPC.", + "RationaleStatement": "Security monitoring and forensics cannot depend solely on IP addresses from VPC flow logs, especially when considering the dynamic IP usage of cloud resources, HTTP virtual host routing, and other technology that can obscure the DNS name used by a client from the IP address. Monitoring of Cloud DNS logs provides visibility to DNS names requested by the clients within the VPC. These logs can be monitored for anomalous domain names, evaluated against threat intelligence, and \n\nNote: For full capture of DNS, firewall must block egress UDP/53 (DNS) and TCP/443 (DNS over HTTPS) to prevent client from using external DNS name server for resolution.", + "ImpactStatement": "Enabling of Cloud DNS logging might result in your project being charged for the additional logs usage.", + "RemediationProcedure": "**From Google Cloud CLI**\n\n**Add New DNS Policy With Logging Enabled**\n\nFor each VPC network that needs a DNS policy with logging enabled:\n```\ngcloud dns policies create enable-dns-logging --enable-logging --description=\"Enable DNS Logging\" --networks=VPC_NETWORK_NAME\n```\nThe VPC_NETWORK_NAME can be one or more networks in comma-separated list\n\n**Enable Logging for Existing DNS Policy**\n\nFor each VPC network that has an existing DNS policy that needs logging enabled:\n```\ngcloud dns policies update POLICY_NAME --enable-logging --networks=VPC_NETWORK_NAME\n```\nThe VPC_NETWORK_NAME can be one or more networks in comma-separated list", + "AuditProcedure": "**From Google Cloud CLI**\n\n1. List all VPCs networks in a project:\n```\ngcloud compute networks list --format=\"tablebox,title='All VPC Networks'(name:label='VPC Network Name')\"\n```\n2. List all DNS policies, logging enablement, and associated VPC networks:\n```\ngcloud dns policies list --flatten=\"networks\" --format=\"tablebox,title='All DNS Policies By VPC Network'(name:label='Policy Name',enableLogging:label='Logging Enabled':align=center,networks.networkUrl.basename():label='VPC Network Name')\"\n```\nEach VPC Network should be associated with a DNS policy with logging enabled.", + "AdditionalInformation": "Additional Info\n- Only queries that reach a name server are logged. Cloud DNS resolvers cache responses, queries answered from caches, or direct queries to an external DNS resolver outside the VPC are not logged.", + "References": "https://cloud.google.com/dns/docs/monitoring" + } + ] + }, + { + "Id": "2.3", + "Description": "Enabling retention policies on log buckets will protect logs stored in cloud storage buckets from being overwritten or accidentally deleted. It is recommended to set up retention policies and configure Bucket Lock on all storage buckets that are used as log sinks.", + "Checks": [ + "cloudstorage_bucket_log_retention_policy_lock" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Enabling retention policies on log buckets will protect logs stored in cloud storage buckets from being overwritten or accidentally deleted. It is recommended to set up retention policies and configure Bucket Lock on all storage buckets that are used as log sinks.", + "RationaleStatement": "Logs can be exported by creating one or more sinks that include a log filter and a destination. As Cloud Logging receives new log entries, they are compared against each sink. If a log entry matches a sink's filter, then a copy of the log entry is written to the destination.\n\nSinks can be configured to export logs in storage buckets. It is recommended to configure a data retention policy for these cloud storage buckets and to lock the data retention policy; thus permanently preventing the policy from being reduced or removed. This way, if the system is ever compromised by an attacker or a malicious insider who wants to cover their tracks, the activity logs are definitely preserved for forensics and security investigations.", + "ImpactStatement": "Locking a bucket is an irreversible action. Once you lock a bucket, you cannot remove the retention policy from the bucket or decrease the retention period for the policy. You will then have to wait for the retention period for all items within the bucket before you can delete them, and then the bucket.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. If sinks are **not** configured, first follow the instructions in the recommendation: `Ensure that sinks are configured for all Log entries`.\n\n2. For each storage bucket configured as a sink, go to the Cloud Storage browser at `https://console.cloud.google.com/storage/browser/`.\n\n3. Select the Bucket Lock tab near the top of the page.\n\n4. In the Retention policy entry, click the Add Duration link. The `Set a retention policy` dialog box appears.\n\n5. Enter the desired length of time for the retention period and click `Save policy`.\n\n6. Set the `Lock status` for this retention policy to `Locked`.\n\n**From Google Cloud CLI**\n\n1. To list all sinks destined to storage buckets:\n```\ngcloud logging sinks list --folder=FOLDER_ID | --organization=ORGANIZATION_ID | --project=PROJECT_ID\n```\n2. For each storage bucket listed above, set a retention policy and lock it:\n```\ngsutil retention set TIME_DURATION gs://BUCKET_NAME\ngsutil retention lock gs://BUCKET_NAME\n```\n\nFor more information, visit https://cloud.google.com/storage/docs/using-bucket-lock#set-policy(https://cloud.google.com/storage/docs/using-bucket-lock#set-policy).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Open the Cloud Storage browser in the Google Cloud Console by visiting https://console.cloud.google.com/storage/browser(https://console.cloud.google.com/storage/browser).\n\n2. In the Column display options menu, make sure `Retention policy` is checked.\n\n3. In the list of buckets, the retention period of each bucket is found in the `Retention policy` column. If the retention policy is locked, an image of a lock appears directly to the left of the retention period.\n\n**From Google Cloud CLI**\n\n1. To list all sinks destined to storage buckets:\n```\ngcloud logging sinks list --folder=FOLDER_ID | --organization=ORGANIZATION_ID | --project=PROJECT_ID\n```\n2. For every storage bucket listed above, verify that retention policies and Bucket Lock are enabled:\n```\ngsutil retention get gs://BUCKET_NAME\n```\n\nFor more information, see https://cloud.google.com/storage/docs/using-bucket-lock#view-policy(https://cloud.google.com/storage/docs/using-bucket-lock#view-policy).", + "AdditionalInformation": "Caution: Locking a retention policy is an irreversible action. Once locked, you must delete the entire bucket in order to \"remove\" the bucket's retention policy. However, before you can delete the bucket, you must be able to delete all the objects in the bucket, which itself is only possible if all the objects have reached the retention period set by the retention policy.", + "References": "https://cloud.google.com/storage/docs/bucket-lock:https://cloud.google.com/storage/docs/using-bucket-lock:https://cloud.google.com/storage/docs/bucket-lock" + } + ] + }, + { + "Id": "2.2", + "Description": "It is recommended to create a sink that will export copies of all the log entries. This can help aggregate logs from multiple projects and export them to a Security Information and Event Management (SIEM).", + "Checks": [ + "logging_sink_created" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to create a sink that will export copies of all the log entries. This can help aggregate logs from multiple projects and export them to a Security Information and Event Management (SIEM).", + "RationaleStatement": "Log entries are held in Cloud Logging. To aggregate logs, export them to a SIEM. To keep them longer, it is recommended to set up a log sink. Exporting involves writing a filter that selects the log entries to export, and choosing a destination in Cloud Storage, BigQuery, or Cloud Pub/Sub. The filter and destination are held in an object called a sink. To ensure all log entries are exported to sinks, ensure that there is no filter configured for a sink.\nSinks can be created in projects, organizations, folders, and billing accounts.", + "ImpactStatement": "There are no costs or limitations in Cloud Logging for exporting logs, but the export destinations charge for storing or transmitting the log data.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `Logs Router` by visiting https://console.cloud.google.com/logs/router(https://console.cloud.google.com/logs/router).\n\n2. Click on the arrow symbol with `CREATE SINK` text.\n\n3. Fill out the fields for `Sink details`.\n\n4. Choose Cloud Logging bucket in the Select sink destination drop down menu.\n\n5. Choose a log bucket in the next drop down menu.\n\n6. If an inclusion filter is not provided for this sink, all ingested logs will be routed to the destination provided above. This may result in higher than expected resource usage.\n\n7. Click `Create Sink`.\n\nFor more information, see https://cloud.google.com/logging/docs/export/configure_export_v2#dest-create(https://cloud.google.com/logging/docs/export/configure_export_v2#dest-create).\n\n**From Google Cloud CLI**\n\nTo create a sink to export all log entries in a Google Cloud Storage bucket: \n\n```\ngcloud logging sinks create storage.googleapis.com/DESTINATION_BUCKET_NAME\n```\n\nSinks can be created for a folder or organization, which will include all projects.\n\n```\ngcloud logging sinks create storage.googleapis.com/DESTINATION_BUCKET_NAME --include-children --folder=FOLDER_ID | --organization=ORGANIZATION_ID\n```\n\n**Note:** \n\n1. A sink created by the command-line above will export logs in storage buckets. However, sinks can be configured to export logs into BigQuery, or Cloud Pub/Sub, or `Custom Destination`.\n\n2. While creating a sink, the sink option `--log-filter` is not used to ensure the sink exports all log entries.\n\n3. A sink can be created at a folder or organization level that collects the logs of all the projects underneath bypassing the option `--include-children` in the gcloud command.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Logs Router` by visiting https://console.cloud.google.com/logs/router(https://console.cloud.google.com/logs/router).\n\n2. For every sink, click the 3-dot button for Menu options and select `View sink details`.\n\n3. Ensure there is at least one sink with an `empty` Inclusion filter.\n\n4. Additionally, ensure that the resource configured as `Destination` exists.\n\n**From Google Cloud CLI**\n\n1. Ensure that a sink with an `empty filter` exists. List the sinks for the project, folder or organization. If sinks are configured at a folder or organization level, they do not need to be configured for each project:\n```\ngcloud logging sinks list --folder=FOLDER_ID | --organization=ORGANIZATION_ID | --project=PROJECT_ID\n```\n\nThe output should list at least one sink with an `empty filter`.\n\n2. Additionally, ensure that the resource configured as `Destination` exists.\n\nSee https://cloud.google.com/sdk/gcloud/reference/beta/logging/sinks/list(https://cloud.google.com/sdk/gcloud/reference/beta/logging/sinks/list) for more information.", + "AdditionalInformation": "For Command-Line Audit and Remediation, the sink destination of type `Cloud Storage Bucket` is considered. However, the destination could be configured to\n`Cloud Storage Bucket` or `BigQuery` or `Cloud Pub\\Sub` or `Custom Destination`. Command Line Interface commands would change accordingly.", + "References": "https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/logging/quotas:https://cloud.google.com/logging/docs/routing/overview:https://cloud.google.com/logging/docs/export/using_exported_logs:https://cloud.google.com/logging/docs/export/configure_export_v2:https://cloud.google.com/logging/docs/export/aggregated_exports:https://cloud.google.com/sdk/gcloud/reference/beta/logging/sinks/list" + } + ] + }, + { + "Id": "2.5", + "Description": "Google Cloud Platform (GCP) services write audit log entries to the Admin Activity and Data Access logs to help answer the questions of, \"who did what, where, and when?\" within GCP projects.\n\nCloud audit logging records information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by GCP services. Cloud audit logging provides a history of GCP API calls for an account, including API calls made via the console, SDKs, command-line tools, and other GCP services.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_audit_configuration_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Google Cloud Platform (GCP) services write audit log entries to the Admin Activity and Data Access logs to help answer the questions of, \"who did what, where, and when?\" within GCP projects.\n\nCloud audit logging records information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by GCP services. Cloud audit logging provides a history of GCP API calls for an account, including API calls made via the console, SDKs, command-line tools, and other GCP services.", + "RationaleStatement": "Admin activity and data access logs produced by cloud audit logging enable security analysis, resource change tracking, and compliance auditing.\n\nConfiguring the metric filter and alerts for audit configuration changes ensures the recommended state of audit configuration is maintained so that all activities in the project are audit-able at any point in time.", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Create the prescribed log metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n2. Click the down arrow symbol on the `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`.\n\n3. Clear any text and add: \n```\nprotoPayload.methodName=\"SetIamPolicy\" AND\nprotoPayload.serviceData.policyDelta.auditConfigDeltas:*\n```\n4. Click `Submit Filter`. Display logs appear based on the filter text entered by the user.\n\n5. In the `Metric Editor` menu on the right, fill out the name field. Set `Units` to `1` (default) and `Type` to `Counter`. This will ensure that the log metric counts the number of log entries matching the user's advanced logs query.\n\n6. Click `Create Metric`. \n\n**Create a prescribed Alert Policy:** \n\n1. Identify the new metric the user just created, under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the new metric and select `Create alert from Metric`. A new page opens.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n4. Configure the desired notifications channels in the section `Notifications`.\n\n5. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate a prescribed Log Metric:\n- Use the command: gcloud beta logging metrics create \n- Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create\n(https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create)\nCreate prescribed Alert Policy \n- Use the command: gcloud alpha monitoring policies create\n- Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create(https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create)", + "AuditProcedure": "**From Google Cloud Console**\n\n**Ensure the prescribed log metric is present:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure that at least one metric `` is present with the filter text:\n```\nprotoPayload.methodName=\"SetIamPolicy\" AND\nprotoPayload.serviceData.policyDelta.auditConfigDeltas:*\n```\n**Ensure that the prescribed alerting policy is present:**\n\n3. Go to `Alerting` by visiting https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of 0 for greater than zero(0) seconds`, means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization.\n\n5. Ensure that appropriate notifications channels have been set up.\n\n**From Google Cloud CLI**\n\n**Ensure that the prescribed log metric is present:**\n\n1. List the log metrics:\n```\ngcloud beta logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with the filter set to: \n```\nprotoPayload.methodName=\"SetIamPolicy\" AND\nprotoPayload.serviceData.policyDelta.auditConfigDeltas:*\n```\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure that the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains at least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`", + "AdditionalInformation": "", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/logging/docs/audit/configure-data-access#getiampolicy-setiampolicy" + } + ] + }, + { + "Id": "2.10", + "Description": "It is recommended that a metric filter and alarm be established for Cloud Storage Bucket IAM changes.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_bucket_permission_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that a metric filter and alarm be established for Cloud Storage Bucket IAM changes.", + "RationaleStatement": "Monitoring changes to cloud storage bucket permissions may reduce the time needed to detect and correct permissions on sensitive cloud storage buckets and objects inside the bucket.", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage. These charges could be significant depending on the size of the organization.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Create the prescribed log metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n2. Click the down arrow symbol on the `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`.\n\n3. Clear any text and add: \n```\nresource.type=\"gcs_bucket\" \nAND protoPayload.methodName=\"storage.setIamPermissions\"\n```\n4. Click `Submit Filter`. Display logs appear based on the filter text entered by the user.\n\n5. In the `Metric Editor` menu on right, fill out the name field. Set `Units` to `1` (default) and `Type` to `Counter`. This ensures that the log metric counts the number of log entries matching the user's advanced logs query.\n\n6. Click `Create Metric`. \n\n**Create the prescribed Alert Policy:** \n\n1. Identify the newly created metric under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the new metric and select `Create alert from Metric`. A new page appears.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n\n4. Configure the desired notifications channels in the section `Notifications`.\n\n5. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate the prescribed Log Metric:\n- Use the command: gcloud beta logging metrics create \n\nCreate the prescribed alert policy: \n- Use the command: gcloud alpha monitoring policies create", + "AuditProcedure": "**From Google Cloud Console**\n\n**Ensure the prescribed log metric is present:**\n\n1. For each project that contains cloud storage buckets, go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure at least one metric `` is present with the filter text:\n\n```\nresource.type=\"gcs_bucket\"\nAND protoPayload.methodName=\"storage.setIamPermissions\"\n```\n\n**Ensure that the prescribed alerting policy is present:**\n\n3. Go to `Alerting` by visiting https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of 0 for greater than 0 seconds` means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization.\n\n5. Ensure that the appropriate notifications channels have been set up.\n\n**From Google Cloud CLI**\n\n**Ensure that the prescribed log metric is present:**\n\n1. List the log metrics:\n```\ngcloud logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with the filter set to: \n```\nresource.type=gcs_bucket \nAND protoPayload.methodName=\"storage.setIamPermissions\"\n```\n\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains an least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`", + "AdditionalInformation": "", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/storage/docs/overview:https://cloud.google.com/storage/docs/access-control/iam-roles" + } + ] + }, + { + "Id": "2.6", + "Description": "It is recommended that a metric filter and alarm be established for changes to Identity and Access Management (IAM) role creation, deletion and updating activities.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_custom_role_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended that a metric filter and alarm be established for changes to Identity and Access Management (IAM) role creation, deletion and updating activities.", + "RationaleStatement": "Google Cloud IAM provides predefined roles that give granular access to specific Google Cloud Platform resources and prevent unwanted access to other resources. However, to cater to organization-specific needs, Cloud IAM also provides the ability to create custom roles. Project owners and administrators with the Organization Role Administrator role or the IAM Role Administrator role can create custom roles. \nMonitoring role creation, deletion and updating activities will help in identifying any over-privileged role at early stages.", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage.", + "RemediationProcedure": "**From Console:**\n\n**Create the prescribed log metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n1. Click the down arrow symbol on the `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`.\n\n1. Clear any text and add: \n\n```\nresource.type=\"iam_role\" \nAND (protoPayload.methodName = \"google.iam.admin.v1.CreateRole\" \nOR protoPayload.methodName=\"google.iam.admin.v1.DeleteRole\" \nOR protoPayload.methodName=\"google.iam.admin.v1.UpdateRole\")\n```\n\n1. Click `Submit Filter`. Display logs appear based on the filter text entered by the user.\n\n1. In the `Metric Editor` menu on the right, fill out the name field. Set `Units` to `1` (default) and `Type` to `Counter`. This ensures that the log metric counts the number of log entries matching the advanced logs query.\n\n1. Click `Create Metric`. \n\n**Create a prescribed Alert Policy:** \n\n1. Identify the new metric that was just created under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the metric and select `Create alert from Metric`. A new page displays.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value ensures that a notification is triggered for every owner change in the project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n\n1. Configure the desired notification channels in the section `Notifications`.\n\n1. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate the prescribed Log Metric:\n- Use the command: gcloud logging metrics create \n\nCreate the prescribed Alert Policy: \n- Use the command: gcloud alpha monitoring policies create ", + "AuditProcedure": "**From Console:**\n\n**Ensure that the prescribed log metric is present:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure that at least one metric `` is present with filter text:\n\n```\nresource.type=\"iam_role\" \nAND (protoPayload.methodName=\"google.iam.admin.v1.CreateRole\" \nOR protoPayload.methodName=\"google.iam.admin.v1.DeleteRole\" \nOR protoPayload.methodName=\"google.iam.admin.v1.UpdateRole\")\n```\n\n**Ensure that the prescribed alerting policy is present:**\n\n3. Go to `Alerting` by visiting https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of zero(0) for greater than zero(0) seconds` means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization.\n\n5. Ensure that the appropriate notifications channels have been set up.\n\n**From Google Cloud CLI**\n\nEnsure that the prescribed log metric is present:\n\n1. List the log metrics:\n\n```\ngcloud logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with the filter set to:\n\n```\nresource.type=\"iam_role\"\nAND (protoPayload.methodName = \"google.iam.admin.v1.CreateRole\" OR\nprotoPayload.methodName=\"google.iam.admin.v1.DeleteRole\" OR\nprotoPayload.methodName=\"google.iam.admin.v1.UpdateRole\")\n```\n\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure that the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains an least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/iam/docs/understanding-custom-roles" + } + ] + }, + { + "Id": "2.11", + "Description": "It is recommended that a metric filter and alarm be established for SQL instance configuration changes.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_sql_instance_configuration_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that a metric filter and alarm be established for SQL instance configuration changes.", + "RationaleStatement": "Monitoring changes to SQL instance configuration changes may reduce the time needed to detect and correct misconfigurations done on the SQL server. \n\nBelow are a few of the configurable options which may the impact security posture of an SQL instance:\n\n- Enable auto backups and high availability: Misconfiguration may adversely impact business continuity, disaster recovery, and high availability \n\n- Authorize networks: Misconfiguration may increase exposure to untrusted networks", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage. These charges could be significant depending on the size of the organization.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Create the prescribed Log Metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n2. Click the down arrow symbol on the `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`.\n\n3. Clear any text and add: \n\n```\nprotoPayload.methodName=\"cloudsql.instances.update\"\n```\n\n4. Click `Submit Filter`. Display logs appear based on the filter text entered by the user.\n\n5. In the `Metric Editor` menu on right, fill out the name field. Set `Units` to `1` (default) and `Type` to `Counter`. This ensures that the log metric counts the number of log entries matching the user's advanced logs query.\n\n6. Click `Create Metric`. \n\n**Create the prescribed alert policy:** \n\n1. Identify the newly created metric under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the new metric and select `Create alert from Metric`. A new page appears.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the user's project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n\n4. Configure the desired notification channels in the section `Notifications`.\n\n5. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate the prescribed log metric:\n- Use the command: gcloud logging metrics create \n\nCreate the prescribed alert policy: \n- Use the command: gcloud alpha monitoring policies create\n- Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create(https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create)", + "AuditProcedure": "**From Google Cloud Console**\n\n**Ensure the prescribed log metric is present:**\n\n1. For each project that contains Cloud SQL instances, go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure that at least one metric `` is present with the filter text:\n\n```\nprotoPayload.methodName=\"cloudsql.instances.update\"\n```\n\n**Ensure that the prescribed alerting policy is present:**\n\n3. Go to `Alerting` by visiting https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of zero(0) for greater than zero(0) seconds` means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization.\n\n5. Ensure that the appropriate notifications channels have been set up.\n\n**From Google Cloud CLI**\n\n**Ensure that the prescribed log metric is present:**\n\n1. List the log metrics:\n```\ngcloud logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with the filter set to \n```\nprotoPayload.methodName=\"cloudsql.instances.update\"\n```\n\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure that the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains at least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`", + "AdditionalInformation": "", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/storage/docs/overview:https://cloud.google.com/sql/docs/:https://cloud.google.com/sql/docs/mysql/:https://cloud.google.com/sql/docs/postgres/" + } + ] + }, + { + "Id": "2.9", + "Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network changes.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_vpc_network_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network changes.", + "RationaleStatement": "It is possible to have more than one VPC within a project. In addition, it is also possible to create a peer connection between two VPCs enabling network traffic to route between VPCs. \n\nMonitoring changes to a VPC will help ensure VPC traffic flow is not getting impacted.", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage. These charges could be significant depending on the size of the organization.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Create the prescribed log metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n2. Click the down arrow symbol on `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`.\n\n3. Clear any text and add: \n\n```\nresource.type=\"gce_network\" \nAND (protoPayload.methodName:\"compute.networks.insert\" \nOR protoPayload.methodName:\"compute.networks.patch\" \nOR protoPayload.methodName:\"compute.networks.delete\" \nOR protoPayload.methodName:\"compute.networks.removePeering\" \nOR protoPayload.methodName:\"compute.networks.addPeering\")\n```\n\n4. Click `Submit Filter`. Display logs appear based on the filter text entered by the user.\n\n5. In the `Metric Editor` menu on the right, fill out the name field. Set `Units` to `1` (default) and `Type` to `Counter`. This ensures that the log metric counts the number of log entries matching the user's advanced logs query.\n\n6. Click `Create Metric`. \n\n**Create the prescribed alert policy:** \n\n1. Identify the newly created metric under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the new metric and select `Create alert from Metric`. A new page appears.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of 0 for the most recent value will ensure that a notification is triggered for every owner change in the project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n\n4. Configure the desired notification channels in the section `Notifications`.\n\n5. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate the prescribed Log Metric:\n- Use the command: gcloud logging metrics create \n\nCreate the prescribed alert policy: \n- Use the command: gcloud alpha monitoring policies create", + "AuditProcedure": "**From Google Cloud Console**\n\n**Ensure the prescribed log metric is present:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure at least one metric `` is present with filter text:\n\n```\nresource.type=\"gce_network\" \nAND (protoPayload.methodName:\"compute.networks.insert\" \nOR protoPayload.methodName:\"compute.networks.patch\" \nOR protoPayload.methodName:\"compute.networks.delete\" \nOR protoPayload.methodName:\"compute.networks.removePeering\" \nOR protoPayload.methodName:\"compute.networks.addPeering\")\n```\n\n**Ensure the prescribed alerting policy is present:**\n\n3. Go to `Alerting` by visiting https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of 0 for greater than 0 seconds` means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization.\n\n5. Ensure that appropriate notification channels have been set up.\n\n**From Google Cloud CLI**\n\n**Ensure the log metric is present:**\n\n1. List the log metrics:\n```\ngcloud logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with filter set to: \n```\nresource.type=\"gce_network\" \nAND protoPayload.methodName=\"beta.compute.networks.insert\" \nOR protoPayload.methodName=\"beta.compute.networks.patch\" \nOR protoPayload.methodName=\"v1.compute.networks.delete\" \nOR protoPayload.methodName=\"v1.compute.networks.removePeering\" \nOR protoPayload.methodName=\"v1.compute.networks.addPeering\"\n```\n\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains at least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`", + "AdditionalInformation": "", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/vpc/docs/overview" + } + ] + }, + { + "Id": "2.14", + "Description": "GCP Access Transparency provides audit logs for all actions that Google personnel take in your Google Cloud resources.", + "Checks": [], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "GCP Access Transparency provides audit logs for all actions that Google personnel take in your Google Cloud resources.", + "RationaleStatement": "Controlling access to your information is one of the foundations of information security. Given that Google Employees do have access to your organizations' projects for support reasons, you should have logging in place to view who, when, and why your information is being accessed.", + "ImpactStatement": "To use Access Transparency your organization will need to have at one of the following support level: Premium, Enterprise, Platinum, or Gold. There will be subscription costs associated with support, as well as increased storage costs for storing the logs. You will also not be able to turn Access Transparency off yourself, and you will need to submit a service request to Google Cloud Support.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Add privileges to enable Access Transparency**\n\n1. From the Google Cloud Home, within the project you wish to check, click on the Navigation hamburger menu in the top left. Hover over the 'IAM and Admin'. Select `IAM` in the top of the column that opens. \n\n2. Click the blue button the says `+add` at the top of the screen.\n\n3. In the `principals` field, select a user or group by typing in their associated email address.\n\n4. Click on the `role` field to expand it. In the filter field enter `Access Transparency Admin` and select it.\n\n5. Click `save`.\n\n**Verify that the Google Cloud project is associated with a billing account**\n\n1. From the Google Cloud Home, click on the Navigation hamburger menu in the top left. Select `Billing`.\n\n2. If you see `This project is not associated with a billing account` you will need to enter billing information or switch to a project with a billing account.\n\n**Enable Access Transparency**\n\n1. From the Google Cloud Home, click on the Navigation hamburger menu in the top left. Hover over the IAM & Admin Menu. Select `settings` in the middle of the column that opens.\n\n2. Click the blue button labeled Enable `Access Transparency for Organization`", + "AuditProcedure": "**From Google Cloud Console**\n\n**Determine if Access Transparency is Enabled**\n\n1. From the Google Cloud Home, click on the Navigation hamburger menu in the top left. Hover over the IAM & Admin Menu. Select `settings` in the middle of the column that opens.\n\n2. The status will be under the heading `Access Transparency`. Status should be `Enabled`", + "AdditionalInformation": "To enable Access Transparency for your Google Cloud organization, your Google Cloud organization must have one of the following customer support levels: Premium, Enterprise, Platinum, or Gold.", + "References": "https://cloud.google.com/cloud-provider-access-management/access-transparency/docs/overview:https://cloud.google.com/cloud-provider-access-management/access-transparency/docs/enable:https://cloud.google.com/cloud-provider-access-management/access-transparency/docs/reading-logs:https://cloud.google.com/cloud-provider-access-management/access-transparency/docs/reading-logs#justification_reason_codes:https://cloud.google.com/cloud-provider-access-management/access-transparency/docs/supported-services" + } + ] + }, + { + "Id": "2.7", + "Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) Network Firewall rule changes.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_vpc_firewall_rule_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) Network Firewall rule changes.", + "RationaleStatement": "Monitoring for Create or Update Firewall rule events gives insight to network access changes and may reduce the time it takes to detect suspicious activity.", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage. These charges could be significant depending on the size of the organization.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Create the prescribed log metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n2. Click the down arrow symbol on the `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`.\n\n3. Clear any text and add: \n\n```\nresource.type=\"gce_firewall_rule\" \nAND (protoPayload.methodName:\"compute.firewalls.patch\" \nOR protoPayload.methodName:\"compute.firewalls.insert\"\nOR protoPayload.methodName:\"compute.firewalls.delete\")\n```\n\n4. Click `Submit Filter`. Display logs appear based on the filter text entered by the user.\n\n5. In the `Metric Editor` menu on the right, fill out the name field. Set `Units` to `1` (default) and `Type` to `Counter`. This ensures that the log metric counts the number of log entries matching the advanced logs query.\n\n6. Click `Create Metric`. \n\n**Create the prescribed Alert Policy:** \n\n1. Identify the newly created metric under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the new metric and select `Create alert from Metric`. A new page displays.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value ensures that a notification is triggered for every owner change in the project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n\n4. Configure the desired notifications channels in the section `Notifications`.\n\n5. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate the prescribed Log Metric\n- Use the command: gcloud logging metrics create \n\nCreate the prescribed alert policy: \n- Use the command: gcloud alpha monitoring policies create", + "AuditProcedure": "**From Google Cloud Console**\n\n**Ensure that the prescribed log metric is present:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure at least one metric `` is present with this filter text:\n\n```\nresource.type=\"gce_firewall_rule\" \nAND (protoPayload.methodName:\"compute.firewalls.patch\" \nOR protoPayload.methodName:\"compute.firewalls.insert\"\nOR protoPayload.methodName:\"compute.firewalls.delete\")\n```\n\n**Ensure that the prescribed alerting policy is present:**\n\n3. Go to `Alerting` by visiting https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of zero(0) for greater than zero(0) seconds` means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization.\n\n5. Ensure that appropriate notification channels have been set up.\n\n**From Google Cloud CLI**\n\n**Ensure that the prescribed log metric is present:**\n\n1. List the log metrics:\n```\ngcloud logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with the filter set to: \n\n```\nresource.type=\"gce_firewall_rule\" \nAND (protoPayload.methodName:\"compute.firewalls.patch\" \nOR protoPayload.methodName:\"compute.firewalls.insert\"\nOR protoPayload.methodName:\"compute.firewalls.delete\")\n```\n\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure that the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains an least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`", + "AdditionalInformation": "", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/vpc/docs/firewalls" + } + ] + }, + { + "Id": "2.8", + "Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network route changes.", + "Checks": [ + "logging_log_metric_filter_and_alert_for_vpc_network_route_changes_enabled" + ], + "Attributes": [ + { + "Section": "2. Logging and Monitoring", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that a metric filter and alarm be established for Virtual Private Cloud (VPC) network route changes.", + "RationaleStatement": "Google Cloud Platform (GCP) routes define the paths network traffic takes from a VM instance to another destination. The other destination can be inside the organization VPC network (such as another VM) or outside of it. Every route consists of a destination and a next hop. Traffic whose destination IP is within the destination range is sent to the next hop for delivery. \n\nMonitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.", + "ImpactStatement": "Enabling of logging may result in your project being charged for the additional logs usage. These charges could be significant depending on the size of the organization.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Create the prescribed Log Metric:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics) and click \"CREATE METRIC\".\n\n2. Click the down arrow symbol on the `Filter Bar` at the rightmost corner and select `Convert to Advanced Filter`\n\n3. Clear any text and add: \n\n```\nresource.type=\"gce_route\" \nAND (protoPayload.methodName:\"compute.routes.delete\" \nOR protoPayload.methodName:\"compute.routes.insert\")\n```\n\n4. Click `Submit Filter`. Display logs appear based on the filter text entered by the user.\n\n5. In the `Metric Editor` menu on the right, fill out the name field. Set `Units` to `1` (default) and `Type` to `Counter`. This ensures that the log metric counts the number of log entries matching the user's advanced logs query.\n\n6. Click `Create Metric`. \n\n**Create the prescribed alert policy:** \n\n1. Identify the newly created metric under the section `User-defined Metrics` at https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. Click the 3-dot icon in the rightmost column for the new metric and select `Create alert from Metric`. A new page displays.\n\n3. Fill out the alert policy configuration and click `Save`. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value ensures that a notification is triggered for every owner change in the project:\n```\nSet `Aggregator` to `Count`\n\nSet `Configuration`:\n\n- Condition: above\n\n- Threshold: 0\n\n- For: most recent value\n```\n\n4. Configure the desired notification channels in the section `Notifications`.\n\n5. Name the policy and click `Save`.\n\n**From Google Cloud CLI**\n\nCreate the prescribed Log Metric:\n- Use the command: gcloud logging metrics create \n\nCreate the prescribed the alert policy: \n- Use the command: gcloud alpha monitoring policies create", + "AuditProcedure": "**From Google Cloud Console**\n\n**Ensure that the prescribed Log metric is present:**\n\n1. Go to `Logging/Logs-based Metrics` by visiting https://console.cloud.google.com/logs/metrics(https://console.cloud.google.com/logs/metrics).\n\n2. In the `User-defined Metrics` section, ensure that at least one metric `` is present with the filter text:\n\n```\nresource.type=\"gce_route\" \nAND (protoPayload.methodName:\"compute.routes.delete\" \nOR protoPayload.methodName:\"compute.routes.insert\")\n```\n\n**Ensure the prescribed alerting policy is present:**\n\n3. Go to `Alerting` by visiting: https://console.cloud.google.com/monitoring/alerting(https://console.cloud.google.com/monitoring/alerting).\n\n4. Under the `Policies` section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, `Violates when: Any logging.googleapis.com/user/ stream` `is above a threshold of 0 for greater than zero(0) seconds` means that the alert will trigger for any new owner change. Verify that the chosen alert thresholds make sense for the user's organization.\n\n5. Ensure that the appropriate notification channels have been set up.\n\n**From Google Cloud CLI**\n\n**Ensure the prescribed log metric is present:**\n\n1. List the log metrics:\n```\ngcloud logging metrics list --format json\n```\n2. Ensure that the output contains at least one metric with the filter set to: \n\n```\nresource.type=\"gce_route\" \nAND (protoPayload.methodName:\"compute.routes.delete\" \nOR protoPayload.methodName:\"compute.routes.insert\")\n```\n\n3. Note the value of the property `metricDescriptor.type` for the identified metric, in the format `logging.googleapis.com/user/`.\n\n**Ensure that the prescribed alerting policy is present:**\n\n4. List the alerting policies:\n```\ngcloud alpha monitoring policies list --format json\n```\n5. Ensure that the output contains an least one alert policy where:\n- `conditions.conditionThreshold.filter` is set to `metric.type=\\\"logging.googleapis.com/user/\\\"`\n- AND `enabled` is set to `true`", + "AdditionalInformation": "", + "References": "https://cloud.google.com/logging/docs/logs-based-metrics/:https://cloud.google.com/monitoring/custom-metrics/:https://cloud.google.com/monitoring/alerts/:https://cloud.google.com/logging/docs/reference/tools/gcloud-logging:https://cloud.google.com/storage/docs/access-control/iam:https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create:https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create" + } + ] + }, + { + "Id": "3.2", + "Description": "In order to prevent use of legacy networks, a project should not have a legacy network configured. As of now, Legacy Networks are gradually being phased out, and you can no longer create projects with them. This recommendation is to check older projects to ensure that they are not using Legacy Networks.", + "Checks": [ + "compute_network_not_legacy" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "In order to prevent use of legacy networks, a project should not have a legacy network configured. As of now, Legacy Networks are gradually being phased out, and you can no longer create projects with them. This recommendation is to check older projects to ensure that they are not using Legacy Networks.", + "RationaleStatement": "Legacy networks have a single network IPv4 prefix range and a single gateway IP address for the whole network. The network is global in scope and spans all cloud regions. Subnetworks cannot be created in a legacy network and are unable to switch from legacy to auto or custom subnet networks. Legacy networks can have an impact for high network traffic projects and are subject to a single point of contention or failure.", + "ImpactStatement": "None.", + "RemediationProcedure": "**From Google Cloud CLI**\n\nFor each Google Cloud Platform project,\n\n1. Follow the documentation and create a non-legacy network suitable for the organization's requirements.\n\n2. Follow the documentation and delete the networks in the `legacy` mode.", + "AuditProcedure": "**From Google Cloud CLI**\n\nFor each Google Cloud Platform project,\n\n1. Set the project name in the Google Cloud Shell:\n```\n\ngcloud config set project \n```\n2. List the networks configured in that project:\n```\n\ngcloud compute networks list \n```\nNone of the listed networks should be in the `legacy` mode.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/vpc/docs/using-legacy#creating_a_legacy_network:https://cloud.google.com/vpc/docs/using-legacy#deleting_a_legacy_network" + } + ] + }, + { + "Id": "3.3", + "Description": "Cloud Domain Name System (DNS) is a fast, reliable and cost-effective domain name system that powers millions of domains on the internet. Domain Name System Security Extensions (DNSSEC) in Cloud DNS enables domain owners to take easy steps to protect their domains against DNS hijacking and man-in-the-middle and other attacks.", + "Checks": [ + "dns_dnssec_disabled" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Cloud Domain Name System (DNS) is a fast, reliable and cost-effective domain name system that powers millions of domains on the internet. Domain Name System Security Extensions (DNSSEC) in Cloud DNS enables domain owners to take easy steps to protect their domains against DNS hijacking and man-in-the-middle and other attacks.", + "RationaleStatement": "Domain Name System Security Extensions (DNSSEC) adds security to the DNS protocol by enabling DNS responses to be validated. Having a trustworthy DNS that translates a domain name like www.example.com into its associated IP address is an increasingly important building block of today’s web-based applications. Attackers can hijack this process of domain/IP lookup and redirect users to a malicious site through DNS hijacking and man-in-the-middle attacks. DNSSEC helps mitigate the risk of such attacks by cryptographically signing DNS records. As a result, it prevents attackers from issuing fake DNS responses that may misdirect browsers to nefarious websites.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `Cloud DNS` by visiting https://console.cloud.google.com/net-services/dns/zones(https://console.cloud.google.com/net-services/dns/zones).\n2. For each zone of `Type` `Public`, set `DNSSEC` to `On`.\n\n**From Google Cloud CLI**\n\nUse the below command to enable `DNSSEC` for Cloud DNS Zone Name.\n```\ngcloud dns managed-zones update ZONE_NAME --dnssec-state on\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Cloud DNS` by visiting https://console.cloud.google.com/net-services/dns/zones(https://console.cloud.google.com/net-services/dns/zones).\n2. For each zone of `Type` `Public`, ensure that `DNSSEC` is set to `On`.\n\n**From Google Cloud CLI**\n\n1. List all the Managed Zones in a project:\n```\ngcloud dns managed-zones list\n```\n\n2. For each zone of `VISIBILITY` `public`, get its metadata: \n\n```\ngcloud dns managed-zones describe ZONE_NAME\n```\n\n3. Ensure that `dnssecConfig.state` property is `on`.", + "AdditionalInformation": "", + "References": "https://cloudplatform.googleblog.com/2017/11/DNSSEC-now-available-in-Cloud-DNS.html:https://cloud.google.com/dns/dnssec-config#enabling:https://cloud.google.com/dns/dnssec" + } + ] + }, + { + "Id": "3.7", + "Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances.\n\nFirewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.", + "Checks": [ + "compute_firewall_rdp_access_from_the_internet_allowed" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow users to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances.\n\nFirewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the Internet to a VPC or VM instance using `RDP` on `Port 3389` can be avoided.", + "RationaleStatement": "GCP `Firewall Rules` within a `VPC Network`. These rules apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication).\nFor an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general `(0.0.0.0/0)` destination `IP Range` specified from the Internet through `RDP` with the default `Port 3389`. Generic access from the Internet to a specific IP Range should be restricted.", + "ImpactStatement": "All Remote Desktop Protocol (RDP) connections from outside of the network to the concerned VPC(s) will be blocked. There could be a business need where secure shell access is required from outside of the network to access resources associated with the VPC. In that case, specific source IP(s) should be mentioned in firewall rules to white-list access to RDP port for the concerned VPC(s).", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `VPC Network`.\n2. Go to the `Firewall Rules`.\n3. Click the `Firewall Rule` to be modified.\n4. Click `Edit`.\n5. Modify `Source IP ranges` to specific `IP`.\n6. Click `Save`.\n\n**From Google Cloud CLI**\n\n1.Update RDP Firewall rule with new `SOURCE_RANGE` from the below command:\n\n gcloud compute firewall-rules update FirewallName --allow=PROTOCOL:PORT-PORT,... --source-ranges=CIDR_RANGE,...", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `VPC network`.\n2. Go to the `Firewall Rules`.\n3. Ensure `Port` is not equal to `3389` and `Action` is not `Allow`.\n4. Ensure `IP Ranges` is not equal to `0.0.0.0/0` under `Source filters`.\n\n**From Google Cloud CLI**\n\n gcloud compute firewall-rules list --format=table'(name,direction,sourceRanges,allowed.ports)'\n\nEnsure that there is no rule matching the below criteria:\n- `SOURCE_RANGES` is `0.0.0.0/0`\n- AND `DIRECTION` is `INGRESS`\n- AND IPProtocol is `TCP` or `ALL`\n- AND `PORTS` is set to `3389` or `range containing 3389` or `Null (not set)`\n\nNote: \n- When ALL TCP ports are allowed in a rule, PORT does not have any value set (`NULL`)\n- When ALL Protocols are allowed in a rule, PORT does not have any value set (`NULL`)", + "AdditionalInformation": "Currently, GCP VPC only supports IPV4; however, Google is already working on adding IPV6 support for VPC. In that case along with source IP range `0.0.0.0`, the rule should be checked for IPv6 equivalent `::0` as well.", + "References": "https://cloud.google.com/vpc/docs/firewalls#blockedtraffic:https://cloud.google.com/blog/products/identity-security/cloud-iap-enables-context-aware-access-to-vms-via-ssh-and-rdp-without-bastion-hosts" + } + ] + }, + { + "Id": "3.4", + "Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract.\n\nDNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.", + "Checks": [ + "dns_rsasha1_in_use_to_key_sign_in_dnssec" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract.\n\nDNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.", + "RationaleStatement": "Domain Name System Security Extensions (DNSSEC) algorithm numbers in this registry may be used in CERT RRs. Zonesigning (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms.\n\nThe algorithm used for key signing should be a recommended one and it should be strong. When enabling DNSSEC for a managed zone, or creating a managed zone with DNSSEC, the user can select the DNSSEC signing algorithms and the denial-of-existence type. Changing the DNSSEC settings is only effective for a managed zone if DNSSEC is not already enabled. If there is a need to change the settings for a managed zone where it has been enabled, turn DNSSEC off and then re-enable it with different settings.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud CLI**\n\n1. If it is necessary to change the settings for a managed zone where it has been enabled, NSSEC must be turned off and re-enabled with different settings. To turn off DNSSEC, run the following command:\n\n```\ngcloud dns managed-zones update ZONE_NAME --dnssec-state off\n```\n\n2. To update key-signing for a reported managed DNS Zone, run the following command:\n\n```\ngcloud dns managed-zones update ZONE_NAME --dnssec-state on --ksk-algorithm KSK_ALGORITHM --ksk-key-length KSK_KEY_LENGTH --zsk-algorithm ZSK_ALGORITHM --zsk-key-length ZSK_KEY_LENGTH --denial-of-existence DENIAL_OF_EXISTENCE\n```\n\nSupported algorithm options and key lengths are as follows.\n\n Algorithm KSK Length ZSK Length\n --------- ---------- ----------\n RSASHA1 1024,2048 1024,2048\n RSASHA256 1024,2048 1024,2048\n RSASHA512 1024,2048 1024,2048\n ECDSAP256SHA256 256 256\n ECDSAP384SHA384 384 384", + "AuditProcedure": "**From Google Cloud CLI**\n\nEnsure the property algorithm for keyType keySigning is not using `RSASHA1`.\n\n gcloud dns managed-zones describe ZONENAME --format=\"json(dnsName,dnssecConfig.state,dnssecConfig.defaultKeySpecs)\"", + "AdditionalInformation": "1. RSASHA1 key-signing support may be required for compatibility reasons.\n2. Remediation CLI works well with gcloud-cli version 221.0.0 and later.", + "References": "https://cloud.google.com/dns/dnssec-advanced#advanced_signing_options" + } + ] + }, + { + "Id": "3.5", + "Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract.\n\nDNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.", + "Checks": [ + "dns_rsasha1_in_use_to_zone_sign_in_dnssec" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "NOTE: Currently, the SHA1 algorithm has been removed from general use by Google, and, if being used, needs to be whitelisted on a project basis by Google and will also, therefore, require a Google Cloud support contract.\n\nDNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. The algorithm used for key signing should be a recommended one and it should be strong.", + "RationaleStatement": "DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms.\n\nThe algorithm used for key signing should be a recommended one and it should be strong. When enabling DNSSEC for a managed zone, or creating a managed zone with DNSSEC, the DNSSEC signing algorithms and the denial-of-existence type can be selected. Changing the DNSSEC settings is only effective for a managed zone if DNSSEC is not already enabled. If the need exists to change the settings for a managed zone where it has been enabled, turn DNSSEC off and then re-enable it with different settings.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud CLI**\n\n1. If the need exists to change the settings for a managed zone where it has been enabled, DNSSEC must be turned off and then re-enabled with different settings. To turn off DNSSEC, run following command:\n```\ngcloud dns managed-zones update ZONE_NAME --dnssec-state off\n```\n\n2. To update zone-signing for a reported managed DNS Zone, run the following command:\n```\ngcloud dns managed-zones update ZONE_NAME --dnssec-state on --ksk-algorithm KSK_ALGORITHM --ksk-key-length KSK_KEY_LENGTH --zsk-algorithm ZSK_ALGORITHM --zsk-key-length ZSK_KEY_LENGTH --denial-of-existence DENIAL_OF_EXISTENCE\n```\n\nSupported algorithm options and key lengths are as follows.\n\n Algorithm KSK Length ZSK Length\n --------- ---------- ----------\n RSASHA1 1024,2048 1024,2048\n RSASHA256 1024,2048 1024,2048\n RSASHA512 1024,2048 1024,2048\n ECDSAP256SHA256 256 384\n ECDSAP384SHA384 384 384", + "AuditProcedure": "**From Google Cloud CLI**\n\nEnsure the property algorithm for keyType zone signing is not using RSASHA1.\n\n```\ngcloud dns managed-zones describe --format=\"json(dnsName,dnssecConfig.state,dnssecConfig.defaultKeySpecs)\"\n```", + "AdditionalInformation": "1. RSASHA1 zone-signing support may be required for compatibility reasons.\n2. The remediation CLI works well with gcloud-cli version 221.0.0 and later.", + "References": "https://cloud.google.com/dns/dnssec-advanced#advanced_signing_options" + } + ] + }, + { + "Id": "3.6", + "Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow the user to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances.\n\nFirewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, only an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the internet to VPC or VM instance using `SSH` on `Port 22` can be avoided.", + "Checks": [ + "compute_firewall_ssh_access_from_the_internet_allowed" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "GCP `Firewall Rules` are specific to a `VPC Network`. Each rule either `allows` or `denies` traffic when its conditions are met. Its conditions allow the user to specify the type of traffic, such as ports and protocols, and the source or destination of the traffic, including IP addresses, subnets, and instances.\n\nFirewall rules are defined at the VPC network level and are specific to the network in which they are defined. The rules themselves cannot be shared among networks. Firewall rules only support IPv4 traffic. When specifying a source for an ingress rule or a destination for an egress rule by address, only an `IPv4` address or `IPv4 block in CIDR` notation can be used. Generic `(0.0.0.0/0)` incoming traffic from the internet to VPC or VM instance using `SSH` on `Port 22` can be avoided.", + "RationaleStatement": "GCP `Firewall Rules` within a `VPC Network` apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication).\nFor an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general `(0.0.0.0/0)` destination `IP Range` specified from the Internet through `SSH` with the default `Port 22`. Generic access from the Internet to a specific IP Range needs to be restricted.", + "ImpactStatement": "All Secure Shell (SSH) connections from outside of the network to the concerned VPC(s) will be blocked. There could be a business need where SSH access is required from outside of the network to access resources associated with the VPC. In that case, specific source IP(s) should be mentioned in firewall rules to white-list access to SSH port for the concerned VPC(s).", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `VPC Network`.\n2. Go to the `Firewall Rules`.\n3. Click the `Firewall Rule` you want to modify.\n4. Click `Edit`.\n5. Modify `Source IP ranges` to specific `IP`.\n6. Click `Save`.\n\n**From Google Cloud CLI**\n\n1.Update the Firewall rule with the new `SOURCE_RANGE` from the below command:\n\n gcloud compute firewall-rules update FirewallName --allow=PROTOCOL:PORT-PORT,... --source-ranges=CIDR_RANGE,...", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `VPC network`.\n2. Go to the `Firewall Rules`.\n3. Ensure that `Port` is not equal to `22` and `Action` is not set to `Allow`.\n4. Ensure `IP Ranges` is not equal to `0.0.0.0/0` under `Source filters`.\n\n**From Google Cloud CLI**\n\n gcloud compute firewall-rules list --format=table'(name,direction,sourceRanges,allowed)'\n\nEnsure that there is no rule matching the below criteria:\n- `SOURCE_RANGES` is `0.0.0.0/0`\n- AND `DIRECTION` is `INGRESS`\n- AND IPProtocol is `tcp` or `ALL`\n- AND `PORTS` is set to `22` or `range containing 22` or `Null (not set)`\n\nNote: \n- When ALL TCP ports are allowed in a rule, PORT does not have any value set (`NULL`)\n- When ALL Protocols are allowed in a rule, PORT does not have any value set (`NULL`)", + "AdditionalInformation": "Currently, GCP VPC only supports IPV4; however, Google is already working on adding IPV6 support for VPC. In that case along with source IP range `0.0.0.0`, the rule should be checked for IPv6 equivalent `::0` as well.", + "References": "https://cloud.google.com/vpc/docs/firewalls#blockedtraffic:https://cloud.google.com/blog/products/identity-security/cloud-iap-enables-context-aware-access-to-vms-via-ssh-and-rdp-without-bastion-hosts" + } + ] + }, + { + "Id": "3.1", + "Description": "To prevent use of `default` network, a project should not have a `default` network.", + "Checks": [ + "compute_network_default_in_use" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "To prevent use of `default` network, a project should not have a `default` network.", + "RationaleStatement": "The `default` network has a preconfigured network configuration and automatically generates the following insecure firewall rules: \n\n- default-allow-internal: Allows ingress connections for all protocols and ports among instances in the network.\n- default-allow-ssh: Allows ingress connections on TCP port 22(SSH) from any source to any instance in the network.\n- default-allow-rdp: Allows ingress connections on TCP port 3389(RDP) from any source to any instance in the network.\n- default-allow-icmp: Allows ingress ICMP traffic from any source to any instance in the network.\n\nThese automatically created firewall rules do not get audit logged and cannot be configured to enable firewall rule logging. \n\nFurthermore, the default network is an auto mode network, which means that its subnets use the same predefined range of IP addresses, and as a result, it's not possible to use Cloud VPN or VPC Network Peering with the default network. \n\nBased on organization security and networking requirements, the organization should create a new network and delete the `default` network.", + "ImpactStatement": "When an organization deletes the default network, it may need to migrate or service onto a new network.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the `VPC networks` page by visiting: https://console.cloud.google.com/networking/networks/list(https://console.cloud.google.com/networking/networks/list).\n\n2. Click the network named `default`.\n\n2. On the network detail page, click `EDIT`.\n\n3. Click `DELETE VPC NETWORK`.\n\n4. If needed, create a new network to replace the default network.\n\n**From Google Cloud CLI**\n\nFor each Google Cloud Platform project,\n\n1. Delete the default network:\n```\ngcloud compute networks delete default\n```\n\n2. If needed, create a new network to replace it:\n```\ngcloud compute networks create NETWORK_NAME\n```\n\n**Prevention:**\n\nThe user can prevent the default network and its insecure default firewall rules from being created by setting up an Organization Policy to `Skip default network creation` at https://console.cloud.google.com/iam-admin/orgpolicies/compute-skipDefaultNetworkCreation(https://console.cloud.google.com/iam-admin/orgpolicies/compute-skipDefaultNetworkCreation).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the `VPC networks` page by visiting: https://console.cloud.google.com/networking/networks/list(https://console.cloud.google.com/networking/networks/list).\n\n2. Ensure that a network with the name `default` is not present.\n\n**From Google Cloud CLI**\n\n1. Set the project name in the Google Cloud Shell:\n```\n\ngcloud config set project PROJECT_ID \n```\n2. List the networks configured in that project:\n```\ngcloud compute networks list \n```\nIt should not list `default` as one of the available networks in that project.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/compute/docs/networking#firewall_rules:https://cloud.google.com/compute/docs/reference/latest/networks/insert:https://cloud.google.com/compute/docs/reference/latest/networks/delete:https://cloud.google.com/vpc/docs/firewall-rules-logging:https://cloud.google.com/vpc/docs/vpc#default-network:https://cloud.google.com/sdk/gcloud/reference/compute/networks/delete" + } + ] + }, + { + "Id": "3.8", + "Description": "Flow Logs is a feature that enables users to capture information about the IP traffic going to and from network interfaces in the organization's VPC Subnets. Once a flow log is created, the user can view and retrieve its data in Stackdriver Logging. It is recommended that Flow Logs be enabled for every business-critical VPC subnet.", + "Checks": [ + "compute_subnet_flow_logs_enabled" + ], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Flow Logs is a feature that enables users to capture information about the IP traffic going to and from network interfaces in the organization's VPC Subnets. Once a flow log is created, the user can view and retrieve its data in Stackdriver Logging. It is recommended that Flow Logs be enabled for every business-critical VPC subnet.", + "RationaleStatement": "VPC networks and subnetworks not reserved for internal HTTP(S) load balancing provide logically isolated and secure network partitions where GCP resources can be launched. When Flow Logs are enabled for a subnet, VMs within that subnet start reporting on all Transmission Control Protocol (TCP) and User Datagram Protocol (UDP) flows.\nEach VM samples the TCP and UDP flows it sees, inbound and outbound, whether the flow is to or from another VM, a host in the on-premises datacenter, a Google service, or a host on the Internet. If two GCP VMs are communicating, and both are in subnets that have VPC Flow Logs enabled, both VMs report the flows.\n\nFlow Logs supports the following use cases:\n\n- Network monitoring\n- Understanding network usage and optimizing network traffic expenses\n- Network forensics\n- Real-time security analysis\n\nFlow Logs provide visibility into network traffic for each VM inside the subnet and can be used to detect anomalous traffic or provide insight during security workflows.\n\nThe Flow Logs must be configured such that all network traffic is logged, the interval of logging is granular to provide detailed information on the connections, no logs are filtered, and metadata to facilitate investigations are included.\n\n**Note**: Subnets reserved for use by internal HTTP(S) load balancers do not support VPC flow logs.", + "ImpactStatement": "Standard pricing for Stackdriver Logging, BigQuery, or Cloud Pub/Sub applies. VPC Flow Logs generation will be charged starting in GA as described in reference: https://cloud.google.com/vpc/", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the VPC network GCP Console visiting `https://console.cloud.google.com/networking/networks/list` \n\n2. Click the name of a subnet, The `Subnet details` page displays.\n\n3. Click the `EDIT` button.\n\n4. Set `Flow Logs` to `On`.\n\n5. Expand the `Configure Logs` section.\n\n6. Set `Aggregation Interval` to `5 SEC`.\n\n7. Check the box beside `Include metadata`.\n\n8. Set `Sample rate` to `100`.\n\n9. Click Save.\n\n**Note**: It is not possible to configure a Log filter from the console.\n\n**From Google Cloud CLI**\n\nTo enable VPC Flow Logs for a network subnet, run the following command:\n```\ngcloud compute networks subnets update SUBNET_NAME --region REGION --enable-flow-logs --logging-aggregation-interval=interval-5-sec --logging-flow-sampling=1 --logging-metadata=include-all\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the VPC network GCP Console visiting `https://console.cloud.google.com/networking/networks/list` \n\n2. From the list of network subnets, make sure for each subnet:\n- `Flow Logs` is set to `On`\n- `Aggregation Interval` is set to `5 sec`\n- `Include metadata` checkbox is checked\n- `Sample rate` is set to `100%`\n\n**Note**: It is not possible to determine if a Log filter has been defined from the console.\n\n**From Google Cloud CLI**\n\n```\ngcloud compute networks subnets list --format json | \\\n jq -r '(\"Subnet\",\"Purpose\",\"Flow_Logs\",\"Aggregation_Interval\",\"Flow_Sampling\",\"Metadata\",\"Logs_Filtered\" | (., map(length*\"-\"))), \n (. | \n \n .name, \n .purpose,\n (if has(\"enableFlowLogs\") and .enableFlowLogs == true then \"Enabled\" else \"Disabled\" end),\n (if has(\"logConfig\") then .logConfig.aggregationInterval else \"N/A\" end),\n (if has(\"logConfig\") then .logConfig.flowSampling else \"N/A\" end),\n (if has(\"logConfig\") then .logConfig.metadata else \"N/A\" end),\n (if has(\"logConfig\") then (.logConfig | has(\"filterExpr\")) else \"N/A\" end)\n \n ) | \n @tsv' | \\\n column -t\n\n```\n\nThe output of the above command will list:\n- each subnet\n- the subnet's purpose\n- a `Enabled` or `Disabled` value if `Flow Logs` are enabled\n- the value for `Aggregation Interval` or `N/A` if disabled, the value for `Flow Sampling` or `N/A` if disabled\n- the value for `Metadata` or `N/A` if disabled\n- 'true' or 'false' if a Logging Filter is configured or 'N/A' if disabled.\n\nIf the subnet's purpose is `PRIVATE` then `Flow Logs` should be `Enabled`.\n\nIf `Flow Logs` is enabled then:\n- `Aggregation_Interval` should be `INTERVAL_5_SEC`\n- `Flow_Sampling` should be 1\n- `Metadata` should be `INCLUDE_ALL_METADATA`\n- `Logs_Filtered` should be `false`.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/vpc/docs/using-flow-logs#enabling_vpc_flow_logging:https://cloud.google.com/vpc/" + } + ] + }, + { + "Id": "3.9", + "Description": "Secure Sockets Layer (SSL) policies determine what port Transport Layer Security (TLS) features clients are permitted to use when connecting to load balancers. To prevent usage of insecure features, SSL policies should use (a) at least TLS 1.2 with the MODERN profile; or (b) the RESTRICTED profile, because it effectively requires clients to use TLS 1.2 regardless of the chosen minimum TLS version; or (3) a CUSTOM profile that does not support any of the following features: \n```\nTLS_RSA_WITH_AES_128_GCM_SHA256\nTLS_RSA_WITH_AES_256_GCM_SHA384\nTLS_RSA_WITH_AES_128_CBC_SHA\nTLS_RSA_WITH_AES_256_CBC_SHA\nTLS_RSA_WITH_3DES_EDE_CBC_SHA\n```", + "Checks": [], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "Secure Sockets Layer (SSL) policies determine what port Transport Layer Security (TLS) features clients are permitted to use when connecting to load balancers. To prevent usage of insecure features, SSL policies should use (a) at least TLS 1.2 with the MODERN profile; or (b) the RESTRICTED profile, because it effectively requires clients to use TLS 1.2 regardless of the chosen minimum TLS version; or (3) a CUSTOM profile that does not support any of the following features: \n```\nTLS_RSA_WITH_AES_128_GCM_SHA256\nTLS_RSA_WITH_AES_256_GCM_SHA384\nTLS_RSA_WITH_AES_128_CBC_SHA\nTLS_RSA_WITH_AES_256_CBC_SHA\nTLS_RSA_WITH_3DES_EDE_CBC_SHA\n```", + "RationaleStatement": "Load balancers are used to efficiently distribute traffic across multiple servers. Both SSL proxy and HTTPS load balancers are external load balancers, meaning they distribute traffic from the Internet to a GCP network. GCP customers can configure load balancer SSL policies with a minimum TLS version (1.0, 1.1, or 1.2) that clients can use to establish a connection, along with a profile (Compatible, Modern, Restricted, or Custom) that specifies permissible cipher suites. To comply with users using outdated protocols, GCP load balancers can be configured to permit insecure cipher suites. In fact, the GCP default SSL policy uses a minimum TLS version of 1.0 and a Compatible profile, which allows the widest range of insecure cipher suites. As a result, it is easy for customers to configure a load balancer without even knowing that they are permitting outdated cipher suites.", + "ImpactStatement": "Creating more secure SSL policies can prevent clients using older TLS versions from establishing a connection.", + "RemediationProcedure": "**From Google Cloud Console**\n\nIf the TargetSSLProxy or TargetHttpsProxy does not have an SSL policy configured, create a new SSL policy. Otherwise, modify the existing insecure policy. \n\n1. Navigate to the `SSL Policies` page by visiting: https://console.cloud.google.com/net-security/sslpolicies(https://console.cloud.google.com/net-security/sslpolicies)\n2. Click on the name of the insecure policy to go to its `SSL policy details` page.\n3. Click `EDIT`.\n4. Set `Minimum TLS version` to `TLS 1.2`.\n5. Set `Profile` to `Modern` or `Restricted`. \n6. Alternatively, if teh user selects the profile `Custom`, make sure that the following features are disabled: \n```\nTLS_RSA_WITH_AES_128_GCM_SHA256\nTLS_RSA_WITH_AES_256_GCM_SHA384\nTLS_RSA_WITH_AES_128_CBC_SHA\nTLS_RSA_WITH_AES_256_CBC_SHA\nTLS_RSA_WITH_3DES_EDE_CBC_SHA\n```\n\n**From Google Cloud CLI**\n\n1. For each insecure SSL policy, update it to use secure cyphers:\n```\ngcloud compute ssl-policies update NAME --profile COMPATIBLE|MODERN|RESTRICTED|CUSTOM --min-tls-version 1.2 --custom-features FEATURES\n```\n\n2. If the target proxy has a GCP default SSL policy, use the following command corresponding to the proxy type to update it.\n\n```\ngcloud compute target-ssl-proxies update TARGET_SSL_PROXY_NAME --ssl-policy SSL_POLICY_NAME\ngcloud compute target-https-proxies update TARGET_HTTPS_POLICY_NAME --ssl-policy SSL_POLICY_NAME\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. See all load balancers by visiting https://console.cloud.google.com/net-services/loadbalancing/loadBalancers/list(https://console.cloud.google.com/net-services/loadbalancing/loadBalancers/list).\n2. For each load balancer for `SSL (Proxy)` or `HTTPS`, click on its name to go the `Load balancer details` page.\n3. Ensure that each target proxy entry in the `Frontend` table has an `SSL Policy` configured. \n4. Click on each SSL policy to go to its `SSL policy details` page.\n5. Ensure that the SSL policy satisfies one of the following conditions: \n- has a `Min TLS` set to `TLS 1.2` and `Profile` set to `Modern` profile, or\n- has `Profile` set to `Restricted`. Note that a Restricted profile effectively requires clients to use TLS 1.2 regardless of the chosen minimum TLS version, or\n- has `Profile` set to `Custom` and the following features are all disabled:\n```\nTLS_RSA_WITH_AES_128_GCM_SHA256\nTLS_RSA_WITH_AES_256_GCM_SHA384\nTLS_RSA_WITH_AES_128_CBC_SHA\nTLS_RSA_WITH_AES_256_CBC_SHA\nTLS_RSA_WITH_3DES_EDE_CBC_SHA\n```\n\n**From Google Cloud CLI**\n\n1. List all TargetHttpsProxies and TargetSslProxies.\n```\ngcloud compute target-https-proxies list\ngcloud compute target-ssl-proxies list\n```\n\n2. For each target proxy, list its properties:\n```\ngcloud compute target-https-proxies describe TARGET_HTTPS_PROXY_NAME\ngcloud compute target-ssl-proxies describe TARGET_SSL_PROXY_NAME\n```\n\n3. Ensure that the `sslPolicy` field is present and identifies the name of the SSL policy: \n```\nsslPolicy: https://www.googleapis.com/compute/v1/projects/PROJECT_ID/global/sslPolicies/SSL_POLICY_NAME\n```\nIf the `sslPolicy` field is missing from the configuration, it means that the GCP default policy is used, which is insecure.\n\n4. Describe the SSL policy:\n```\ngcloud compute ssl-policies describe SSL_POLICY_NAME\n```\n5. Ensure that the policy satisfies one of the following conditions:\n- has `Profile` set to `Modern` and `minTlsVersion` set to `TLS_1_2`, or\n- has `Profile` set to `Restricted`, or\n- has `Profile` set to `Custom` and  `enabledFeatures` does not contain any of the following values:\n```\nTLS_RSA_WITH_AES_128_GCM_SHA256\nTLS_RSA_WITH_AES_256_GCM_SHA384\nTLS_RSA_WITH_AES_128_CBC_SHA\nTLS_RSA_WITH_AES_256_CBC_SHA\nTLS_RSA_WITH_3DES_EDE_CBC_SHA\n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/load-balancing/docs/use-ssl-policies:https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-52r2.pdf" + } + ] + }, + { + "Id": "3.10", + "Description": "IAP authenticates the user requests to your apps via a Google single sign in. You can then manage these users with permissions to control access. It is recommended to use both IAP permissions and firewalls to restrict this access to your apps with sensitive information.", + "Checks": [], + "Attributes": [ + { + "Section": "3. Networking", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "IAP authenticates the user requests to your apps via a Google single sign in. You can then manage these users with permissions to control access. It is recommended to use both IAP permissions and firewalls to restrict this access to your apps with sensitive information.", + "RationaleStatement": "IAP ensure that access to VMs is controlled by authenticating incoming requests. Access to your apps and the VMs should be restricted by firewall rules that allow only the proxy IAP IP addresses contained in the 35.235.240.0/20 subnet. Otherwise, unauthenticated requests can be made to your apps. To ensure that load balancing works correctly health checks should also be allowed.", + "ImpactStatement": "If firewall rules are not configured correctly, legitimate business services could be negatively impacted. It is recommended to make these changes during a time of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n1. Go to the Cloud Console VPC network > Firewall rules(https://console.cloud.google.com/networking/firewalls/list?_ga=2.72166934.480049361.1580860862-1336643914.1580248695).\n2. Select the checkbox next to the following rules:\n - default-allow-http\n - default-allow-https\n - default-allow-internal\n3. Click `Delete`.\n4. Click `Create firewall rule` and set the following values:\n - Name: allow-iap-traffic\n - Targets: All instances in the network\n - Source IP ranges (press Enter after you paste each value in the box, copy each full CIDR IP address):\n - IAP Proxy Addresses `35.235.240.0/20`\n - Google Health Check `130.211.0.0/22`\n - Google Health Check `35.191.0.0/16`\n - Protocols and ports:\n - Specified protocols and ports required for access and management of your app. For example most health check connection protocols would be covered by;\n - tcp:80 (Default HTTP Health Check port)\n - tcp:443 (Default HTTPS Health Check port)\n**Note: if you have custom ports used by your load balancers, you will need to list them here**\n5. When you're finished updating values, click `Create`.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. For each of your apps that have IAP enabled go to the Cloud Console VPC network > Firewall rules.\n2. Verify that the only rules correspond to the following values:\n - Targets: All instances in the network\n - Source IP ranges:\n - IAP Proxy Addresses `35.235.240.0/20`\n - Google Health Check `130.211.0.0/22`\n - Google Health Check `35.191.0.0/16`\n - Protocols and ports:\n - Specified protocols and ports required for access and management of your app. For example most health check connection protocols would be covered by;\n - tcp:80 (Default HTTP Health Check port)\n - tcp:443 (Default HTTPS Health Check port)\n\n**Note: if you have custom ports used by your load balancers, you will need to list them here**", + "AdditionalInformation": "", + "References": "https://cloud.google.com/iap/docs/concepts-overview:https://cloud.google.com/iap/docs/load-balancer-howto:https://cloud.google.com/load-balancing/docs/health-checks:https://cloud.google.com/blog/products/identity-security/cloud-iap-enables-context-aware-access-to-vms-via-ssh-and-rdp-without-bastion-hosts" + } + ] + }, + { + "Id": "4.5", + "Description": "Interacting with a serial port is often referred to as the serial console, which is similar to using a terminal window, in that input and output is entirely in text mode and there is no graphical interface or mouse support.\n\nIf you enable the interactive serial console on an instance, clients can attempt to connect to that instance from any IP address. Therefore interactive serial console support should be disabled.", + "Checks": [ + "compute_instance_serial_ports_in_use" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Interacting with a serial port is often referred to as the serial console, which is similar to using a terminal window, in that input and output is entirely in text mode and there is no graphical interface or mouse support.\n\nIf you enable the interactive serial console on an instance, clients can attempt to connect to that instance from any IP address. Therefore interactive serial console support should be disabled.", + "RationaleStatement": "A virtual machine instance has four virtual serial ports. Interacting with a serial port is similar to using a terminal window, in that input and output is entirely in text mode and there is no graphical interface or mouse support. The instance's operating system, BIOS, and other system-level entities often write output to the serial ports, and can accept input such as commands or answers to prompts. Typically, these system-level entities use the first serial port (port 1) and serial port 1 is often referred to as the serial console.\n\nThe interactive serial console does not support IP-based access restrictions such as IP whitelists. If you enable the interactive serial console on an instance, clients can attempt to connect to that instance from any IP address. This allows anybody to connect to that instance if they know the correct SSH key, username, project ID, zone, and instance name.\n\nTherefore interactive serial console support should be disabled.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud CLI**\n\n1. Login to Google Cloud console\n2. Go to Computer Engine\n3. Go to VM instances\n4. Click on the Specific VM\n5. Click `EDIT`\n6. Unselect `Enable connecting to serial ports` below `Remote access` block.\n7. Click `Save`\n\n**From Google Cloud Console**\n\nUse the below command to disable \n```\ngcloud compute instances add-metadata --zone= --metadata=serial-port-enable=false\n```\n\nor\n\n```\ngcloud compute instances add-metadata --zone= --metadata=serial-port-enable=0\n```\n\n**Prevention:**\n\nYou can prevent VMs from having serial port access enable by `Disable VM serial port access` organization policy: \nhttps://console.cloud.google.com/iam-admin/orgpolicies/compute-disableSerialPortAccess(https://console.cloud.google.com/iam-admin/orgpolicies/compute-disableSerialPortAccess).", + "AuditProcedure": "**From Google Cloud CLI**\n\n1. Login to Google Cloud console\n2. Go to Computer Engine\n3. Go to VM instances\n4. Click on the Specific VM\n5. Ensure `Enable connecting to serial ports` below `Remote access` block is unselected.\n\n**From Google Cloud Console**\n\nEnsure the below command's output shows `null`:\n\n```\ngcloud compute instances describe --zone= --format=\"json(metadata.items.key,metadata.items.value)\"\n``` \n\nor `key` and `value` properties from below command's json response are equal to `serial-port-enable` and `0` or `false` respectively.\n\n```\n {\n \"metadata\": {\n \"items\": \n {\n \"key\": \"serial-port-enable\",\n \"value\": \"0\"\n }\n \n }\n }\n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/compute/docs/instances/interacting-with-serial-console" + } + ] + }, + { + "Id": "4.3", + "Description": "It is recommended to use Instance specific SSH key(s) instead of using common/shared project-wide SSH key(s) to access Instances.", + "Checks": [ + "compute_instance_block_project_wide_ssh_keys_disabled" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to use Instance specific SSH key(s) instead of using common/shared project-wide SSH key(s) to access Instances.", + "RationaleStatement": "Project-wide SSH keys are stored in Compute/Project-meta-data. Project wide SSH keys can be used to login into all the instances within project. Using project-wide SSH keys eases the SSH key management but if compromised, poses the security risk which can impact all the instances within project.\nIt is recommended to use Instance specific SSH keys which can limit the attack surface if the SSH keys are compromised.", + "ImpactStatement": "Users already having Project-wide ssh key pairs and using third party SSH clients will lose access to the impacted Instances. For Project users using gcloud or GCP Console based SSH option, no manual key creation and distribution is required and will be handled by GCE (Google Compute Engine) itself. To access Instance using third party SSH clients Instance specific SSH key pairs need to be created and distributed to the required users.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances). It will list all the instances in your project.\n\n2. Click on the name of the Impacted instance\n\n3. Click `Edit` in the toolbar\n\n4. Under SSH Keys, go to the `Block project-wide SSH keys` checkbox\n\n5. To block users with project-wide SSH keys from connecting to this instance, select `Block project-wide SSH keys`\n\n6. Click `Save` at the bottom of the page\n\n7. Repeat steps for every impacted Instance\n\n**From Google Cloud CLI**\n\nTo block project-wide public SSH keys, set the metadata value to `TRUE`:\n\n```\ngcloud compute instances add-metadata --metadata block-project-ssh-keys=TRUE\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances). It will list all the instances in your project.\n\n2. For every instance, click on the name of the instance.\n\n3. Under `SSH Keys`, ensure `Block project-wide SSH keys` is selected.\n\n**From Google Cloud CLI**\n\n1. List the instances in your project and get details on each instance:\n```\ngcloud compute instances list --format=json\n```\n2. Ensure `key: block-project-ssh-keys` is set to `value: 'true'`.", + "AdditionalInformation": "If OS Login is enabled, SSH keys in instance metadata are ignored, and therefore blocking project-wide SSH keys is not necessary.", + "References": "https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys:https://cloud.google.com/sdk/gcloud/reference/topic/formats" + } + ] + }, + { + "Id": "4.8", + "Description": "To defend against advanced threats and ensure that the boot loader and firmware on your VMs are signed and untampered, it is recommended that Compute instances are launched with Shielded VM enabled.", + "Checks": [ + "compute_instance_shielded_vm_enabled" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "To defend against advanced threats and ensure that the boot loader and firmware on your VMs are signed and untampered, it is recommended that Compute instances are launched with Shielded VM enabled.", + "RationaleStatement": "Shielded VMs are virtual machines (VMs) on Google Cloud Platform hardened by a set of security controls that help defend against rootkits and bootkits. \n\nShielded VM offers verifiable integrity of your Compute Engine VM instances, so you can be confident your instances haven't been compromised by boot- or kernel-level malware or rootkits. Shielded VM's verifiable integrity is achieved through the use of Secure Boot, virtual trusted platform module (vTPM)-enabled Measured Boot, and integrity monitoring.\n\nShielded VM instances run firmware which is signed and verified using Google's Certificate Authority, ensuring that the instance's firmware is unmodified and establishing the root of trust for Secure Boot.\n\nIntegrity monitoring helps you understand and make decisions about the state of your VM instances and the Shielded VM vTPM enables Measured Boot by performing the measurements needed to create a known good boot baseline, called the integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed.\n\nSecure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", + "ImpactStatement": "", + "RemediationProcedure": "To be able turn on `Shielded VM` on an instance, your instance must use an image with Shielded VM support. \n\n**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n2. Click on the instance name to see its `VM instance details` page.\n\n3. Click `STOP` to stop the instance.\n\n4. When the instance has stopped, click `EDIT`.\n\n5. In the Shielded VM section, select `Turn on vTPM` and `Turn on Integrity Monitoring`.\n\n6. Optionally, if you do not use any custom or unsigned drivers on the instance, also select `Turn on Secure Boot`.\n\n7. Click the `Save` button to modify the instance and then click `START` to restart it.\n\n**From Google Cloud CLI**\n\nYou can only enable Shielded VM options on instances that have Shielded VM support. For a list of Shielded VM public images, run the gcloud compute images list command with the following flags:\n\n```\ngcloud compute images list --project gce-uefi-images --no-standard-images\n```\n\n1. Stop the instance:\n```\ngcloud compute instances stop \n```\n2. Update the instance:\n\n```\ngcloud compute instances update --shielded-vtpm --shielded-vm-integrity-monitoring\n```\n3. Optionally, if you do not use any custom or unsigned drivers on the instance, also turn on secure boot.\n\n```\ngcloud compute instances update --shielded-vm-secure-boot\n```\n\n4. Restart the instance:\n\n```\ngcloud compute instances start \n```\n\n**Prevention:**\n\nYou can ensure that all new VMs will be created with Shielded VM enabled by setting up an Organization Policy to for `Shielded VM` at https://console.cloud.google.com/iam-admin/orgpolicies/compute-requireShieldedVm(https://console.cloud.google.com/iam-admin/orgpolicies/compute-requireShieldedVm). Learn more at: \nhttps://cloud.google.com/security/shielded-cloud/shielded-vm#organization-policy-constraint(https://cloud.google.com/security/shielded-cloud/shielded-vm#organization-policy-constraint).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n2. Click on the instance name to see its `VM instance details` page.\n\n3. Under the section `Shielded VM`, ensure that `vTPM` and `Integrity Monitoring` are `on`.\n\n**From Google Cloud CLI**\n\n1. For each instance in your project, get its metadata:\n```\ngcloud compute instances list --format=json | jq -r '. | \"vTPM: \\(..shieldedInstanceConfig.enableVtpm) IntegrityMonitoring: \\(..shieldedInstanceConfig.enableIntegrityMonitoring) Name: \\(..name)\"'\n```\n\n2. Ensure that there is a `shieldedInstanceConfig` configuration and that configuration has the `enableIntegrityMonitoring` and `enableVtpm` set to `true`. If the VM is not a Shield VM image, you will not see a shieldedInstanceConfig` in the output.", + "AdditionalInformation": "If you do use custom or unsigned drivers on the instance, enabling Secure Boot will cause the machine to no longer boot. Turn on Secure Boot only on instances that have been verified to not have any custom drivers installed.", + "References": "https://cloud.google.com/compute/docs/instances/modifying-shielded-vm:https://cloud.google.com/shielded-vm:https://cloud.google.com/security/shielded-cloud/shielded-vm#organization-policy-constraint" + } + ] + }, + { + "Id": "4.4", + "Description": "Enabling OS login binds SSH certificates to IAM users and facilitates effective SSH certificate management.", + "Checks": [ + "compute_project_os_login_enabled" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enabling OS login binds SSH certificates to IAM users and facilitates effective SSH certificate management.", + "RationaleStatement": "Enabling osLogin ensures that SSH keys used to connect to instances are mapped with IAM users. Revoking access to IAM user will revoke all the SSH keys associated with that particular user. It facilitates centralized and automated SSH key pair management which is useful in handling cases like response to compromised SSH key pairs and/or revocation of external/third-party/Vendor users.", + "ImpactStatement": "Enabling OS Login on project disables metadata-based SSH key configurations on all instances from a project. Disabling OS Login restores SSH keys that you have configured in project or instance meta-data.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the VM compute metadata page by visiting: https://console.cloud.google.com/compute/metadata(https://console.cloud.google.com/compute/metadata).\n\n2. Click `Edit`.\n\n3. Add a metadata entry where the key is `enable-oslogin` and the value is `TRUE`.\n\n4. Click `Save` to apply the changes.\n\n5. For every instance that overrides the project setting, go to the `VM Instances` page at https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n6. Click the name of the instance on which you want to remove the metadata value.\n7. At the top of the instance details page, click `Edit` to edit the instance settings.\n8. Under `Custom metadata`, remove any entry with key `enable-oslogin` and the value is `FALSE`\n9. At the bottom of the instance details page, click `Save` to apply your changes to the instance.\n\n**From Google Cloud CLI**\n\n1. Configure oslogin on the project:\n```\ngcloud compute project-info add-metadata --metadata enable-oslogin=TRUE\n```\n2. Remove instance metadata that overrides the project setting.\n```\ngcloud compute instances remove-metadata --keys=enable-oslogin\n```\n\nOptionally, you can enable two factor authentication for OS login. For more information, see: https://cloud.google.com/compute/docs/oslogin/setup-two-factor-authentication(https://cloud.google.com/compute/docs/oslogin/setup-two-factor-authentication).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the VM compute metadata page by visiting https://console.cloud.google.com/compute/metadata(https://console.cloud.google.com/compute/metadata).\n\n2. Ensure that key `enable-oslogin` is present with value set to `TRUE`. \n\n3. Because instances can override project settings, ensure that no instance has custom metadata with key `enable-oslogin` and value `FALSE`.\n\n**From Google Cloud CLI**\n\n1. List the instances in your project and get details on each instance:\n```\ngcloud compute instances list --format=json\n```\n2. Verify that the section `commonInstanceMetadata` has a key `enable-oslogin` set to value `TRUE`.\n**Exception:**\nVMs created by GKE should be excluded. These VMs have names that start with `gke-` and are labeled `goog-gke-node`", + "AdditionalInformation": "1. In order to use osLogin, instance using Custom Images must have the latest version of the Linux Guest Environment installed. The following image families do not yet support OS Login:\n\n```\nProject cos-cloud (Container-Optimized OS) image family cos-stable.\n\nAll project coreos-cloud (CoreOS) image families\n\nProject suse-cloud (SLES) image family sles-11\n\nAll Windows Server and SQL Server image families\n```\n\n2. Project enable-oslogin can be over-ridden by setting enable-oslogin parameter to an instance metadata individually.", + "References": "https://cloud.google.com/compute/docs/instances/managing-instance-access:https://cloud.google.com/compute/docs/instances/managing-instance-access#enable_oslogin:https://cloud.google.com/sdk/gcloud/reference/compute/instances/remove-metadata:https://cloud.google.com/compute/docs/oslogin/setup-two-factor-authentication" + } + ] + }, + { + "Id": "4.9", + "Description": "Compute instances should not be configured to have external IP addresses.", + "Checks": [ + "compute_instance_public_ip" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Compute instances should not be configured to have external IP addresses.", + "RationaleStatement": "To reduce your attack surface, Compute instances should not have public IP addresses. Instead, instances should be configured behind load balancers, to minimize the instance's exposure to the internet.", + "ImpactStatement": "Removing the external IP address from your Compute instance may cause some applications to stop working.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n2. Click on the instance name to go the the `Instance detail page`.\n\n3. Click `Edit`.\n\n4. For each Network interface, ensure that `External IP` is set to `None`.\n\n5. Click `Done` and then click `Save`.\n\n**From Google Cloud CLI**\n\n1. Describe the instance properties:\n```\ngcloud compute instances describe --zone=\n```\n\n2. Identify the access config name that contains the external IP address. This access config appears in the following format:\n\n```\nnetworkInterfaces:\n- accessConfigs:\n - kind: compute#accessConfig\n name: External NAT\n natIP: 130.211.181.55\n type: ONE_TO_ONE_NAT\n```\n\n3. Delete the access config. \n```\ngcloud compute instances delete-access-config --zone= --access-config-name \n```\n\nIn the above example, the `ACCESS_CONFIG_NAME` is `External NAT`. The name of your access config might be different.\n\n**Prevention:**\nYou can configure the `Define allowed external IPs for VM instances` Organization Policy to prevent VMs from being configured with public IP addresses. Learn more at: https://console.cloud.google.com/orgpolicies/compute-vmExternalIpAccess(https://console.cloud.google.com/orgpolicies/compute-vmExternalIpAccess)", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n2. For every VM, ensure that there is no `External IP` configured.\n\n**From Google Cloud CLI**\n\n```\ngcloud compute instances list --format=json\n```\n\n1. The output should not contain an `accessConfigs` section under `networkInterfaces`. Note that the `natIP` value is present only for instances that are running or for instances that are stopped but have a static IP address. For instances that are stopped and are configured to have an ephemeral public IP address, the `natIP` field will not be present. Example output:\n\n```\nnetworkInterfaces:\n- accessConfigs:\n - kind: compute#accessConfig\n name: External NAT\n networkTier: STANDARD\n type: ONE_TO_ONE_NAT\n```\n\n**Exception:**\nInstances created by GKE should be excluded because some of them have external IP addresses and cannot be changed by editing the instance settings. Instances created by GKE should be excluded. These instances have names that start with \"gke-\" and are labeled \"goog-gke-node\".", + "AdditionalInformation": "You can connect to Linux VMs that do not have public IP addresses by using Identity-Aware Proxy for TCP forwarding. Learn more at https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances(https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances)\n\nFor Windows VMs, see https://cloud.google.com/compute/docs/instances/connecting-to-instance(https://cloud.google.com/compute/docs/instances/connecting-to-instance).", + "References": "https://cloud.google.com/load-balancing/docs/backend-service#backends_and_external_ip_addresses:https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances:https://cloud.google.com/compute/docs/instances/connecting-to-instance:https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address#unassign_ip:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints" + } + ] + }, + { + "Id": "4.11", + "Description": "Google Cloud encrypts data at-rest and in-transit, but customer data must be decrypted for processing. Confidential Computing is a breakthrough technology which encrypts data in-use—while it is being processed. Confidential Computing environments keep data encrypted in memory and elsewhere outside the central processing unit (CPU). \n\nConfidential VMs leverage the Secure Encrypted Virtualization (SEV) feature of AMD EPYC™ CPUs. Customer data will stay encrypted while it is used, indexed, queried, or trained on. Encryption keys are generated in hardware, per VM, and not exportable. Thanks to built-in hardware optimizations of both performance and security, there is no significant performance penalty to Confidential Computing workloads.", + "Checks": [ + "compute_instance_confidential_computing_enabled" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Google Cloud encrypts data at-rest and in-transit, but customer data must be decrypted for processing. Confidential Computing is a breakthrough technology which encrypts data in-use—while it is being processed. Confidential Computing environments keep data encrypted in memory and elsewhere outside the central processing unit (CPU). \n\nConfidential VMs leverage the Secure Encrypted Virtualization (SEV) feature of AMD EPYC™ CPUs. Customer data will stay encrypted while it is used, indexed, queried, or trained on. Encryption keys are generated in hardware, per VM, and not exportable. Thanks to built-in hardware optimizations of both performance and security, there is no significant performance penalty to Confidential Computing workloads.", + "RationaleStatement": "Confidential Computing enables customers' sensitive code and other data encrypted in memory during processing. Google does not have access to the encryption keys. Confidential VM can help alleviate concerns about risk related to either dependency on Google infrastructure or Google insiders' access to customer data in the clear.", + "ImpactStatement": "- Confidential Computing for Compute instances does not support live migration. Unlike regular Compute instances, Confidential VMs experience disruptions during maintenance events like a software or hardware update.\n- Additional charges may be incurred when enabling this security feature. See https://cloud.google.com/compute/confidential-vm/pricing(https://cloud.google.com/compute/confidential-vm/pricing) for more info.", + "RemediationProcedure": "Confidential Computing can only be enabled when an instance is created. You must delete the current instance and create a new one.\n\n**From Google Cloud Console**\n\n1. Go to the VM instances page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n2. Click `CREATE INSTANCE`.\n\n3. Fill out the desired configuration for your instance.\n\n4. Under the `Confidential VM service` section, check the option `Enable the Confidential Computing service on this VM instance`.\n\n5. Click `Create`.\n\n**From Google Cloud CLI**\n\nCreate a new instance with Confidential Compute enabled. \n\n```\ngcloud compute instances create --zone --confidential-compute --maintenance-policy=TERMINATE \n```", + "AuditProcedure": "Note: Confidential Computing is currently only supported on N2D machines. To learn more about types of N2D machines, visit https://cloud.google.com/compute/docs/machine-types#n2d_machine_types(https://cloud.google.com/compute/docs/machine-types#n2d_machine_types)\n\n**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n2. Click on the instance name to see its VM instance details page.\n\n3. Ensure that `Confidential VM service` is `Enabled`.\n\n**From Google Cloud CLI**\n\n1. List the instances in your project and get details on each instance:\n\n```\ngcloud compute instances list --format=json\n```\n2. Ensure that `enableConfidentialCompute` is set to `true` for all instances with machine type starting with \"n2d-\".\n\n```\nconfidentialInstanceConfig:\n enableConfidentialCompute: true\n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/compute/confidential-vm/docs/creating-cvm-instance:https://cloud.google.com/compute/confidential-vm/docs/about-cvm:https://cloud.google.com/confidential-computing:https://cloud.google.com/blog/products/identity-security/introducing-google-cloud-confidential-computing-with-confidential-vms" + } + ] + }, + { + "Id": "4.1", + "Description": "It is recommended to configure your instance to not use the default Compute Engine service account because it has the Editor role on the project.", + "Checks": [ + "compute_instance_default_service_account_in_use" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to configure your instance to not use the default Compute Engine service account because it has the Editor role on the project.", + "RationaleStatement": "The default Compute Engine service account has the Editor role on the project, which allows read and write access to most Google Cloud Services. To defend against privilege escalations if your VM is compromised and prevent an attacker from gaining access to all of your project, it is recommended to not use the default Compute Engine service account. Instead, you should create a new service account and assigning only the permissions needed by your instance.\n\nThe default Compute Engine service account is named `PROJECT_NUMBER-compute@developer.gserviceaccount.com`.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n2. Click on the instance name to go to its `VM instance details` page.\n3. Click `STOP` and then click `EDIT`.\n4. Under the section `API and identity management`, select a service account other than the default Compute Engine service account. You may first need to create a new service account.\n5. Click `Save` and then click `START`.\n\n**From Google Cloud CLI**\n\n1. Stop the instance:\n```\ngcloud compute instances stop \n```\n2. Update the instance:\n```\ngcloud compute instances set-service-account --service-account= \n```\n3. Restart the instance:\n```\ngcloud compute instances start \n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n2. Click on each instance name to go to its `VM instance details` page.\n3. Under the section `API and identity management`, ensure that the default Compute Engine service account is not used. This account is named `PROJECT_NUMBER-compute@developer.gserviceaccount.com`.\n\n**From Google Cloud CLI**\n\n1. List the instances in your project and get details on each instance:\n```\ngcloud compute instances list --format=json | jq -r '. | \"SA: \\(..serviceAccounts.email) Name: \\(..name)\"'\n```\n2. Ensure that the service account section has an email that does not match the pattern `PROJECT_NUMBER-compute@developer.gserviceaccount.com`.\n\n**Exception:**\nVMs created by GKE should be excluded. These VMs have names that start with `gke-` and are labeled `goog-gke-node`.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/compute/docs/access/service-accounts:https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances:https://cloud.google.com/sdk/gcloud/reference/compute/instances/set-service-account" + } + ] + }, + { + "Id": "4.2", + "Description": "To support principle of least privileges and prevent potential privilege escalation it is recommended that instances are not assigned to default service account `Compute Engine default service account` with Scope `Allow full access to all Cloud APIs`.", + "Checks": [ + "compute_instance_default_service_account_in_use_with_full_api_access" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "To support principle of least privileges and prevent potential privilege escalation it is recommended that instances are not assigned to default service account `Compute Engine default service account` with Scope `Allow full access to all Cloud APIs`.", + "RationaleStatement": "Along with ability to optionally create, manage and use user managed custom service accounts, Google Compute Engine provides default service account `Compute Engine default service account` for an instances to access necessary cloud services.\n`Project Editor` role is assigned to `Compute Engine default service account` hence, This service account has almost all capabilities over all cloud services except billing.\nHowever, when `Compute Engine default service account` assigned to an instance it can operate in 3 scopes.\n\n```\n1. Allow default access: Allows only minimum access required to run an Instance (Least Privileges)\n\n2. Allow full access to all Cloud APIs: Allow full access to all the cloud APIs/Services (Too much access)\n\n3. Set access for each API: Allows Instance administrator to choose only those APIs that are needed to perform specific business functionality expected by instance\n```\n\nWhen an instance is configured with `Compute Engine default service account` with Scope `Allow full access to all Cloud APIs`, based on IAM roles assigned to the user(s) accessing Instance, it may allow user to perform cloud operations/API calls that user is not supposed to perform leading to successful privilege escalation.", + "ImpactStatement": "In order to change service account or scope for an instance, it needs to be stopped.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n\n2. Click on the impacted VM instance.\n\n3. If the instance is not stopped, click the `Stop` button. Wait for the instance to be stopped.\n\n4. Next, click the `Edit` button.\n\n5. Scroll down to the `Service Account` section.\n\n6. Select a different service account or ensure that `Allow full access to all Cloud APIs` is not selected.\n\n7. Click the `Save` button to save your changes and then click `START`.\n\n**From Google Cloud CLI**\n\n1. Stop the instance:\n```\ngcloud compute instances stop \n```\n2. Update the instance:\n```\ngcloud compute instances set-service-account --service-account= --scopes SCOPE1, SCOPE2...\n```\n3. Restart the instance:\n```\ngcloud compute instances start \n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n2. Click on each instance name to go to its `VM instance details` page.\n3. Under the `API and identity management`, ensure that `Cloud API access scopes` is not set to `Allow full access to all Cloud APIs`.\n\n**From Google Cloud CLI**\n\n1. List the instances in your project and get details on each instance:\n```\ngcloud compute instances list --format=json | jq -r '. | \"SA Scopes: \\(..serviceAccounts.scopes) Name: \\(..name) Email: \\(..serviceAccounts.email)\"'\n```\n2. Ensure that the service account section has an email that does not match the pattern `PROJECT_NUMBER-compute@developer.gserviceaccount.com`.\n\n**Exception:**\nVMs created by GKE should be excluded. These VMs have names that start with `gke-` and are labeled `goog-gke-node", + "AdditionalInformation": "'- User IAM roles will override service account scope but configuring minimal scope ensures defense in depth\n\n- Non-default service accounts do not offer selection of access scopes like default service account. IAM roles with non-default service accounts should be used to control VM access.", + "References": "https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances:https://cloud.google.com/compute/docs/access/service-accounts" + } + ] + }, + { + "Id": "4.6", + "Description": "Compute Engine instance cannot forward a packet unless the source IP address of the packet matches the IP address of the instance. Similarly, GCP won't deliver a packet whose destination IP address is different than the IP address of the instance receiving the packet. However, both capabilities are required if you want to use instances to help route packets.\n\nForwarding of data packets should be disabled to prevent data loss or information disclosure.", + "Checks": [ + "compute_instance_ip_forwarding_is_enabled" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Compute Engine instance cannot forward a packet unless the source IP address of the packet matches the IP address of the instance. Similarly, GCP won't deliver a packet whose destination IP address is different than the IP address of the instance receiving the packet. However, both capabilities are required if you want to use instances to help route packets.\n\nForwarding of data packets should be disabled to prevent data loss or information disclosure.", + "RationaleStatement": "Compute Engine instance cannot forward a packet unless the source IP address of the packet matches the IP address of the instance. Similarly, GCP won't deliver a packet whose destination IP address is different than the IP address of the instance receiving the packet. However, both capabilities are required if you want to use instances to help route packets.\nTo enable this source and destination IP check, disable the `canIpForward` field, which allows an instance to send and receive packets with non-matching destination or source IPs.", + "ImpactStatement": "Deleting instance(s) acting as routers/packet forwarders may break the network connectivity.", + "RemediationProcedure": "You only edit the `canIpForward` setting at instance creation time. Therefore, you need to delete the instance and create a new one where `canIpForward` is set to `false`.\n\n**From Google Cloud Console**\n\n1. Go to the `VM Instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances). \n2. Select the `VM Instance` you want to remediate.\n3. Click the `Delete` button.\n4. On the 'VM Instances' page, click `CREATE INSTANCE'.\n5. Create a new instance with the desired configuration. By default, the instance is configured to not allow IP forwarding.\n\n**From Google Cloud CLI**\n\n1. Delete the instance:\n```\ngcloud compute instances delete INSTANCE_NAME\n```\n\n2. Create a new instance to replace it, with `IP forwarding` set to `Off`\n```\ngcloud compute instances create\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the `VM Instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances). \n2. For every instance, click on its name to go to the `VM instance details` page.\n3. Under the `Network interfaces` section, ensure that `IP forwarding` is set to `Off` for every network interface.\n\n**From Google Cloud CLI**\n\n1. List all instances:\n```\ngcloud compute instances list --format='table(name,canIpForward)'\n```\n2. Ensure that `CAN_IP_FORWARD` column in the output of above command does not contain `True` for any VM instance.\n\n**Exception:**\nInstances created by GKE should be excluded because they need to have IP forwarding enabled and cannot be changed. Instances created by GKE have names that start with \"gke-\".", + "AdditionalInformation": "You can only set the `canIpForward` field at instance creation time. After an instance is created, the field becomes read-only.", + "References": "https://cloud.google.com/vpc/docs/using-routes#canipforward" + } + ] + }, + { + "Id": "4.10", + "Description": "In order to maintain the highest level of security all connections to an application should be secure by default.", + "Checks": [], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "In order to maintain the highest level of security all connections to an application should be secure by default.", + "RationaleStatement": "Insecure HTTP connections maybe subject to eavesdropping which can expose sensitive data.", + "ImpactStatement": "All connections to appengine will automatically be redirected to the HTTPS endpoint ensuring that all connections are secured by TLS.", + "RemediationProcedure": "Add a line to the app.yaml file controlling the application which enforces secure connections. For example\n\n```\nhandlers:\n- url: /.*\n **secure: always**\n redirect_http_response_code: 301\n script: auto\n```\n\nhttps://cloud.google.com/appengine/docs/standard/python3/config/appref", + "AuditProcedure": "Verify that the app.yaml file controlling the application contains a line which enforces secure connections. For example\n\n```\nhandlers:\n- url: /.*\n secure: always\n redirect_http_response_code: 301\n script: auto\n```\n\nhttps://cloud.google.com/appengine/docs/standard/python3/config/appref(https://cloud.google.com/appengine/docs/standard/python3/config/appref)", + "AdditionalInformation": "", + "References": "https://cloud.google.com/appengine/docs/standard/python3/config/appref:https://cloud.google.com/appengine/docs/flexible/nodejs/configuring-your-app-with-app-yaml" + } + ] + }, + { + "Id": "4.7", + "Description": "Customer-Supplied Encryption Keys (CSEK) are a feature in Google Cloud Storage and Google Compute Engine. If you supply your own encryption keys, Google uses your key to protect the Google-generated keys used to encrypt and decrypt your data. By default, Google Compute Engine encrypts all data at rest. Compute Engine handles and manages this encryption for you without any additional actions on your part. However, if you wanted to control and manage this encryption yourself, you can provide your own encryption keys.", + "Checks": [ + "compute_instance_encryption_with_csek_enabled" + ], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "Customer-Supplied Encryption Keys (CSEK) are a feature in Google Cloud Storage and Google Compute Engine. If you supply your own encryption keys, Google uses your key to protect the Google-generated keys used to encrypt and decrypt your data. By default, Google Compute Engine encrypts all data at rest. Compute Engine handles and manages this encryption for you without any additional actions on your part. However, if you wanted to control and manage this encryption yourself, you can provide your own encryption keys.", + "RationaleStatement": "By default, Google Compute Engine encrypts all data at rest. Compute Engine handles and manages this encryption for you without any additional actions on your part. However, if you wanted to control and manage this encryption yourself, you can provide your own encryption keys.\n\nIf you provide your own encryption keys, Compute Engine uses your key to protect the Google-generated keys used to encrypt and decrypt your data. Only users who can provide the correct key can use resources protected by a customer-supplied encryption key.\n\nGoogle does not store your keys on its servers and cannot access your protected data unless you provide the key. This also means that if you forget or lose your key, there is no way for Google to recover the key or to recover any data encrypted with the lost key.\n\nAt least business critical VMs should have VM disks encrypted with CSEK.", + "ImpactStatement": "If you lose your encryption key, you will not be able to recover the data.", + "RemediationProcedure": "Currently there is no way to update the encryption of an existing disk. Therefore you should create a new disk with `Encryption` set to `Customer supplied`.\n\n**From Google Cloud Console**\n\n1. Go to Compute Engine `Disks` by visiting: https://console.cloud.google.com/compute/disks(https://console.cloud.google.com/compute/disks).\n2. Click `CREATE DISK`.\n3. Set `Encryption type` to `Customer supplied`,\n4. Provide the `Key` in the box.\n5. Select `Wrapped key`.\n6. Click `Create`.\n\n**From Google Cloud CLI**\n\nIn the gcloud compute tool, encrypt a disk using the --csek-key-file flag during instance creation. If you are using an RSA-wrapped key, use the gcloud beta component:\n\n```\ngcloud compute instances create --csek-key-file \n```\n\nTo encrypt a standalone persistent disk:\n```\ngcloud compute disks create --csek-key-file \n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to Compute Engine `Disks` by visiting: https://console.cloud.google.com/compute/disks(https://console.cloud.google.com/compute/disks).\n2. Click on the disk for your critical VMs to see its configuration details.\n4. Ensure that `Encryption type` is set to `Customer supplied`.\n\n**From Google Cloud CLI**\n\nEnsure `diskEncryptionKey` property in the below command's response is not null, and contains key `sha256` with corresponding value\n\n```\ngcloud compute disks describe --zone --format=\"json(diskEncryptionKey,name)\"\n```", + "AdditionalInformation": "`Note 1:` When you delete a persistent disk, Google discards the cipher keys, rendering the data irretrievable. This process is irreversible.\n\n`Note 2:` It is up to you to generate and manage your key. You must provide a key that is a 256-bit string encoded in RFC 4648 standard base64 to Compute Engine.\n\n`Note 3:` An example key file looks like this.\n\n \n {\n \"uri\": \"https://www.googleapis.com/compute/v1/projects/myproject/zones/us-central1-a/disks/example-disk\",\n \"key\": \"acXTX3rxrKAFTF0tYVLvydU1riRZTvUNC4g5I11NY-c=\",\n \"key-type\": \"raw\"\n },\n {\n \"uri\": \"https://www.googleapis.com/compute/v1/projects/myproject/global/snapshots/my-private-snapshot\",\n \"key\": \"ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFHz0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoDD6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oeQ5lAbtt7bYAAHf5l+gJWw3sUfs0/Glw5fpdjT8Uggrr+RMZezGrltJEF293rvTIjWOEB3z5OHyHwQkvdrPDFcTqsLfh+8Hr8g+mf+7zVPEC8nEbqpdl3GPv3A7AwpFp7MA==\"\n \"key-type\": \"rsa-encrypted\"\n }\n ", + "References": "https://cloud.google.com/compute/docs/disks/customer-supplied-encryption#encrypt_a_new_persistent_disk_with_your_own_keys:https://cloud.google.com/compute/docs/reference/rest/v1/disks/get:https://cloud.google.com/compute/docs/disks/customer-supplied-encryption#key_file" + } + ] + }, + { + "Id": "4.12", + "Description": "Google Cloud Virtual Machines have the ability via an OS Config agent API to periodically (about every 10 minutes) report OS inventory data. A patch compliance API periodically reads this data, and cross references metadata to determine if the latest updates are installed.\n\nThis is not the only Patch Management solution available to your organization and you should weigh your needs before committing to using this method.", + "Checks": [], + "Attributes": [ + { + "Section": "4. Virtual Machines", + "Profile": "Level 2", + "AssessmentStatus": "Manual", + "Description": "Google Cloud Virtual Machines have the ability via an OS Config agent API to periodically (about every 10 minutes) report OS inventory data. A patch compliance API periodically reads this data, and cross references metadata to determine if the latest updates are installed.\n\nThis is not the only Patch Management solution available to your organization and you should weigh your needs before committing to using this method.", + "RationaleStatement": "Keeping virtual machine operating systems up to date is a security best practice. Using this service will simplify this process.", + "ImpactStatement": "Most Operating Systems require a restart or changing critical resources to apply the updates. Using the Google Cloud VM manager for its OS Patch management will incur additional costs for each VM managed by it. Please view the VM manager pricing reference for further information.", + "RemediationProcedure": "**From Google Cloud Console**\n\n**Enabling OS Patch Management on a Project by Project Basis**\n\n**Install OS Config API for the Project**\n\n1. Navigate into a project. In the expanded portal menu located at the top left of the screen hover over \"APIs & Services\". Then in the menu right of that select \"API Libraries\"\n2. Search for \"VM Manager (OS Config API) or scroll down in the left hand column and select the filter labeled \"Compute\" where it is the last listed. Open this API.\n3. Click the blue 'Enable' button.\n\n**Add MetaData Tags for OSConfig Parsing**\n\n1. From the main Google Cloud console, open the portal menu in the top left. Mouse over Computer Engine to expand the menu next to it.\n2. Under the \"Settings\" heading, select \"Metadata\".\n3. In this view there will be a list of the project wide metadata tags for VMs. Click edit and 'add item' in the key column type 'enable-osconfig' and in the value column set it to 'true'.\n\nFrom Command Line\n\n1. For project wide tagging, run the following command\n\n```\ngcloud compute project-info add-metadata \\\n --project \\\n --metadata=enable-osconfig=TRUE\n```\nPlease see the reference /compute/docs/troubleshooting/vm-manager/verify-setup#metadata-enabled at the bottom for more options like instance specific tagging.\n\nNote: Adding a new tag via commandline may overwrite existing tags. You will need to do this at a time of low usage for the least impact.\n\n**Install and Start the Local OSConfig for Data Parsing**\n\nThere is no way to centrally manage or start the Local OSConfig agent. Please view the reference of manage-os#agent-install to view specific operating system commands. \n\n**Setup a project wide Service Account**\n\nPlease view Recommendation 4.1 to view how to setup a service account. Rerun the audit procedure to test if it has taken effect.\n\n**Enable NAT or Configure Private Google Access to allow Access to Public Update Hosting**\n\nFor the sake of brevity, please see the attached resources to enable NAT or Private Google Access. Rerun the audit procedure to test if it has taken effect.\n\nFrom Command Line:\n\n**Install OS Config API for the Project**\n\n1. In each project you wish to audit run ```gcloud services enable osconfig.googleapis.com```\n\n**Install and Start the Local OSConfig for Data Parsing**\n\nPlease view the reference of manage-os#agent-install to view specific operating system commands.\n\n**Setup a project wide Service Account**\n\nPlease view Recommendation 4.1 to view how to setup a service account. Rerun the audit procedure to test if it has taken effect.\n\n**Enable NAT or Configure Private Google Access to allow Access to Public Update Hosting**\n\nFor the sake of brevity, please see the attached resources to enable NAT or Private Google Access. Rerun the audit procedure to test if it has taken effect.\n\nDetermine if Instances can connect to public update hosting\n\nLinux \n\nDebian Based Operating Systems\n\n```\nsudo apt update\n```\nThe output should have a numbered list of lines with Hit: URL of updates.\n\nRedhat Based Operating Systems\n```\nyum check-update\n```\nThe output should show a list of packages that have updates available.\n\nWindows\n\n```\nping http://windowsupdate.microsoft.com/\n```\nThe ping should successfully be delivered and received.", + "AuditProcedure": "**From Google Cloud Console**\n\n**Determine if OS Config API is Enabled for the Project**\n\n1. Navigate into a project. In the expanded navigation menu located at the top left of the screen hover over `APIs & Services`. Then in the menu right of that select `API Libraries`\n2. Search for \"VM Manager (OS Config API) or scroll down in the left hand column and select the filter labeled \"Compute\" where it is the last listed. Open this API.\n3. Verify the blue button at the top is enabled.\n\n**Determine if VM Instances have correct metadata tags for OSConfig parsing**\n\n1. From the main Google Cloud console, open the hamburger menu in the top left. Mouse over Computer Engine to expand the menu next to it.\n1. Under the \"Settings\" heading, select \"Metadata\".\n1. In this view there will be a list of the project wide metadata tags for VMs. Determine if the tag \"enable-osconfig\" is set to \"true\".\n\n**Determine if the Operating System of VM Instances have the local OS-Config Agent running**\n\nThere is no way to determine this from the Google Cloud console. The only way is to run operating specific commands locally inside the operating system via remote connection. For the sake of brevity of this recommendation please view the docs/troubleshooting/vm-manager/verify-setup reference at the bottom of the page. If you initialized your VM instance with a Google Supplied OS Image with a build date of later than v20200114 it will have the service installed. You should still determine its status for proper operation.\n\n**Verify the service account you have setup for the project in Recommendation 4.1 is running**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n2. Click on each instance name to go to its `VM instance details` page.\n3. Under the section `Service Account`, take note of the service account\n4. Run the commands locally for your operating system that are located at the docs/troubleshooting/vm-manager/verify-setup#service-account-enabled reference located at the bottom of this page. They should return the name of your service account.\n\n**Determine if Instances can connect to public update hosting**\n\nEach type of operating system has its own update process. You will need to determine on each operating system that it can reach the update servers via its network connection. The VM Manager doesn't host the updates, it will only allow you to centrally issue a command to each VM to update.\n\n**Determine if OS Config API is Enabled for the Project**\n\n1. In each project you wish to enable run the following command\n\n ```gcloud services list```\n\n2. If osconfig.googleapis.com is in the left hand column it is enabled for this project.\n\n**Determine if VM Manager is Enabled for the Project**\n\n1. Within the project run the following command:\n```\ngcloud compute instances os-inventory describe VM-NAME \\\n --zone=ZONE\n```\nThe output will look like\n```\nINSTANCE_ID INSTANCE_NAME OS OSCONFIG_AGENT_VERSION UPDATE_TIME\n29255009728795105 centos7 CentOS Linux 7 (Core) 20210217.00-g1.el7 2021-04-12T22:19:36.559Z\n5138980234596718741 rhel-8 Red Hat Enterprise Linux 8.3 (Ootpa) 20210316.00-g1.el8 2021-09-16T17:19:24Z\n7127836223366142250 windows Microsoft Windows Server 2019 Datacenter 20210316.00.0+win@1 2021-09-16T17:13:18Z\n```\n\n**Determine if VM Instances have correct metadata tags for OSConfig parsing**\n\n1. Select the project you want to view tagging in.\n\n**From Google Cloud Console**\n\n1. From the main Google Cloud console, open the hamburger menu in the top left. Mouse over Computer Engine to expand the menu next to it.\n2. Under the \"Settings\" heading, select \"Metadata\".\n3. In this view there will be a list of the project wide metadata tags for Vms. Verify a tag of ‘enable-osconfig’ is in this list and it is set to ‘true’.\n\n**From Command Line**\n\nRun the following command to view instance data\n```\ngcloud compute instances list --format=\"table(name,status,tags.list())\"\n```\nOn each instance it should have a tag of ‘enable-osconfig’ set to ‘true’\n\n**Determine if the Operating System of VM Instances have the local OS-Config Agent running**\n\nThere is no way to determine this from the Google Cloud CLI. The best way is to run the the commands inside the operating system located at 'Check OS-Config agent is installed and running' at the /docs/troubleshooting/vm-manager/verify-setup reference at the bottom of the page. If you initialized your VM instance with a Google Supplied OS Image with a build date of later than v20200114 it will have the service installed. You should still determine its status.\n\n**Verify the service account you have setup for the project in Recommendation 4.1 is running**\n\n1. Go to the `VM instances` page by visiting: https://console.cloud.google.com/compute/instances(https://console.cloud.google.com/compute/instances).\n2. Click on each instance name to go to its `VM instance details` page.\n3. Under the section `Service Account`, take note of the service account\n4. View the compute/docs/troubleshooting/vm-manager/verify-setup#service-account-enabled resource at the bottom of the page for operating system specific commands to run locally.\n\n**Determine if Instances can connect to public update hosting**\n\nLinux \nDebian Based Operating Systems\n```\nsudo apt update\n```\nThe output should have a numbered list of lines with Hit: URL of updates.\n\nRedhat Based Operating Systems\n```\nyum check-update\n```\nThe output should show a list of packages that have updates available.\n\nWindows\n\n```\nping http://windowsupdate.microsoft.com/\n```\nThe ping should successfully be delivered and received.", + "AdditionalInformation": "This is not your only solution to handle updates. This is a Google Cloud specific recommendation to leverage a resource to solve the need for comprehensive update procedures and policy. If you have a solution already in place you do not need to make the switch.\n\nThere are also further resources that would be out of the scope of this recommendation. If you need to allow your VMs to access public hosted updates, please see the reference to setup NAT or Private Google Access.", + "References": "https://cloud.google.com/compute/docs/manage-os:https://cloud.google.com/compute/docs/os-patch-management:https://cloud.google.com/compute/docs/vm-manager:https://cloud.google.com/compute/docs/images/os-details#vm-manager:https://cloud.google.com/compute/docs/vm-manager#pricing:https://cloud.google.com/compute/docs/troubleshooting/vm-manager/verify-setup:https://cloud.google.com/compute/docs/instances/view-os-details#view-data-tools:https://cloud.google.com/compute/docs/os-patch-management/create-patch-job:https://cloud.google.com/nat/docs/set-up-network-address-translation:https://cloud.google.com/vpc/docs/configure-private-google-access:https://workbench.cisecurity.org/sections/811638/recommendations/1334335:https://cloud.google.com/compute/docs/manage-os#agent-install:https://cloud.google.com/compute/docs/troubleshooting/vm-manager/verify-setup#service-account-enabled:https://cloud.google.com/compute/docs/os-patch-management#use-dashboard:https://cloud.google.com/compute/docs/troubleshooting/vm-manager/verify-setup#metadata-enabled" + } + ] + }, + { + "Id": "5.1", + "Description": "It is recommended that IAM policy on Cloud Storage bucket does not allows anonymous or public access.", + "Checks": [ + "cloudstorage_bucket_public_access" + ], + "Attributes": [ + { + "Section": "5. Storage", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended that IAM policy on Cloud Storage bucket does not allows anonymous or public access.", + "RationaleStatement": "Allowing anonymous or public access grants permissions to anyone to access bucket content. Such access might not be desired if you are storing any sensitive data. Hence, ensure that anonymous or public access to a bucket is not allowed.", + "ImpactStatement": "No storage buckets would be publicly accessible. You would have to explicitly administer bucket access.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `Storage browser` by visiting https://console.cloud.google.com/storage/browser(https://console.cloud.google.com/storage/browser).\n2. Click on the bucket name to go to its `Bucket details` page.\n3. Click on the `Permissions` tab. \n4. Click `Delete` button in front of `allUsers` and `allAuthenticatedUsers` to remove that particular role assignment.\n\n**From Google Cloud CLI**\n\nRemove `allUsers` and `allAuthenticatedUsers` access.\n```\ngsutil iam ch -d allUsers gs://BUCKET_NAME\ngsutil iam ch -d allAuthenticatedUsers gs://BUCKET_NAME\n```\n\n**Prevention:**\n\nYou can prevent Storage buckets from becoming publicly accessible by setting up the `Domain restricted sharing` organization policy at: https://console.cloud.google.com/iam-admin/orgpolicies/iam-allowedPolicyMemberDomains (https://console.cloud.google.com/iam-admin/orgpolicies/iam-allowedPolicyMemberDomains).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Storage browser` by visiting https://console.cloud.google.com/storage/browser(https://console.cloud.google.com/storage/browser).\n2. Click on each bucket name to go to its `Bucket details` page.\n3. Click on the `Permissions` tab.\n4. Ensure that `allUsers` and `allAuthenticatedUsers` are not in the `Members` list.\n\n**From Google Cloud CLI**\n\n1. List all buckets in a project\n\n```\ngsutil ls\n```\n\n2. Check the IAM Policy for each bucket:\n\n```\ngsutil iam get gs://BUCKET_NAME\n```\n\nNo role should contain `allUsers` and/or `allAuthenticatedUsers` as a member.\n\n**Using Rest API**\n\n1. List all buckets in a project\n\n```\nGet https://www.googleapis.com/storage/v1/b?project=\n```\n\n2. Check the IAM Policy for each bucket\n\n```\nGET https://www.googleapis.com/storage/v1/b//iam\n```\n\nNo role should contain `allUsers` and/or `allAuthenticatedUsers` as a member.", + "AdditionalInformation": "To implement Access restrictions on buckets, configuring Bucket IAM is preferred way than configuring Bucket ACL. On GCP console, \"Edit Permissions\" for bucket exposes IAM configurations only. Bucket ACLs are configured automatically as per need in order to implement/support User enforced Bucket IAM policy. In-case administrator changes bucket ACL using command-line(gsutils)/API bucket IAM also gets updated automatically.", + "References": "https://cloud.google.com/storage/docs/access-control/iam-reference:https://cloud.google.com/storage/docs/access-control/making-data-public:https://cloud.google.com/storage/docs/gsutil/commands/iam" + } + ] + }, + { + "Id": "5.2", + "Description": "It is recommended that uniform bucket-level access is enabled on Cloud Storage buckets.", + "Checks": [ + "cloudstorage_bucket_uniform_bucket_level_access" + ], + "Attributes": [ + { + "Section": "5. Storage", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended that uniform bucket-level access is enabled on Cloud Storage buckets.", + "RationaleStatement": "It is recommended to use uniform bucket-level access to unify and simplify how you grant access to your Cloud Storage resources. \n\nCloud Storage offers two systems for granting users permission to access your buckets and objects: Cloud Identity and Access Management (Cloud IAM) and Access Control Lists (ACLs). These systems act in parallel - in order for a user to access a Cloud Storage resource, only one of the systems needs to grant the user permission. Cloud IAM is used throughout Google Cloud and allows you to grant a variety of permissions at the bucket and project levels. ACLs are used only by Cloud Storage and have limited permission options, but they allow you to grant permissions on a per-object basis.\n\nIn order to support a uniform permissioning system, Cloud Storage has uniform bucket-level access. Using this feature disables ACLs for all Cloud Storage resources: access to Cloud Storage resources then is granted exclusively through Cloud IAM. Enabling uniform bucket-level access guarantees that if a Storage bucket is not publicly accessible, no object in the bucket is publicly accessible either.", + "ImpactStatement": "If you enable uniform bucket-level access, you revoke access from users who gain their access solely through object ACLs.\n\nCertain Google Cloud services, such as Stackdriver, Cloud Audit Logs, and Datastore, cannot export to Cloud Storage buckets that have uniform bucket-level access enabled.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Open the Cloud Storage browser in the Google Cloud Console by visiting: https://console.cloud.google.com/storage/browser(https://console.cloud.google.com/storage/browser)\n\n2. In the list of buckets, click on the name of the desired bucket.\n\n3. Select the `Permissions` tab near the top of the page.\n\n4. In the text box that starts with `This bucket uses fine-grained access control...`, click `Edit`.\n\n5. In the pop-up menu that appears, select `Uniform`.\n\n6. Click `Save`.\n\n**From Google Cloud CLI**\n\nUse the on option in a uniformbucketlevelaccess set command:\n\n```\ngsutil uniformbucketlevelaccess set on gs://BUCKET_NAME/\n```\n\n**Prevention**\n\nYou can set up an Organization Policy to enforce that any new bucket has uniform bucket level access enabled. Learn more at:\nhttps://cloud.google.com/storage/docs/setting-org-policies#uniform-bucket(https://cloud.google.com/storage/docs/setting-org-policies#uniform-bucket)", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Open the Cloud Storage browser in the Google Cloud Console by visiting: https://console.cloud.google.com/storage/browser(https://console.cloud.google.com/storage/browser)\n\n2. For each bucket, make sure that `Access control` column has the value `Uniform`.\n\n**From Google Cloud CLI**\n\n1. List all buckets in a project\n```\ngsutil ls\n```\n2. For each bucket, verify that uniform bucket-level access is enabled.\n```\ngsutil uniformbucketlevelaccess get gs://BUCKET_NAME/\n```\nIf uniform bucket-level access is enabled, the response looks like:\n\n```\nUniform bucket-level access setting for gs://BUCKET_NAME/:\n Enabled: True\n LockedTime: LOCK_DATE\n```", + "AdditionalInformation": "Uniform bucket-level access can no longer be disabled if it has been active on a bucket for 90 consecutive days.", + "References": "https://cloud.google.com/storage/docs/uniform-bucket-level-access:https://cloud.google.com/storage/docs/using-uniform-bucket-level-access:https://cloud.google.com/storage/docs/setting-org-policies#uniform-bucket" + } + ] + }, + { + "Id": "6.7", + "Description": "It is recommended to have all SQL database instances set to enable automated backups.", + "Checks": [ + "cloudsql_instance_automated_backups" + ], + "Attributes": [ + { + "Section": "6. Cloud SQL Database Services", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to have all SQL database instances set to enable automated backups.", + "RationaleStatement": "Backups provide a way to restore a Cloud SQL instance to recover lost data or recover from a problem with that instance. Automated backups need to be set for any instance that contains data that should be protected from loss or damage. This recommendation is applicable for SQL Server, PostgreSql, MySql generation 1 and MySql generation 2 instances.", + "ImpactStatement": "Automated Backups will increase required size of storage and costs associated with it.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance where the backups need to be configured.\n3. Click `Edit`.\n4. In the `Backups` section, check `Enable automated backups', and choose a backup window.\n5. Click `Save`.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database instances using the following command:\n```\ngcloud sql instances list\n```\n\n2. Enable `Automated backups` for every Cloud SQL database instance using the below command:\n```\ngcloud sql instances patch --backup-start-time \n```\nThe `backup-start-time` parameter is specified in 24-hour time, in the UTC±00 time zone, and specifies the start of a 4-hour backup window. Backups can start any time during the backup window.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Click the instance name to open its instance details page.\n3. Go to the `Backups` menu.\n4. Ensure that `Automated backups` is set to `Enabled` and `Backup time` is mentioned.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database instances using the following command:\n```\ngcloud sql instances list\n```\n\n2. Ensure that the below command returns `True` for every Cloud SQL database instance.\n```\ngcloud sql instances describe --format=\"value('Enabled':settings.backupConfiguration.enabled)\"\n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/sql/docs/mysql/backup-recovery/backups:https://cloud.google.com/sql/docs/postgres/backup-recovery/backing-up" + } + ] + }, + { + "Id": "6.6", + "Description": "It is recommended to configure Second Generation Sql instance to use private IPs instead of public IPs.", + "Checks": [ + "cloudsql_instance_public_ip" + ], + "Attributes": [ + { + "Section": "6. Cloud SQL Database Services", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "It is recommended to configure Second Generation Sql instance to use private IPs instead of public IPs.", + "RationaleStatement": "To lower the organization's attack surface, Cloud SQL databases should not have public IPs. Private IPs provide improved network security and lower latency for your application.", + "ImpactStatement": "Removing the public IP address on SQL instances may break some applications that relied on it for database connectivity.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console: https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances)\n2. Click the instance name to open its Instance details page.\n3. Select the `Connections` tab.\n4. Deselect the `Public IP` checkbox.\n5. Click `Save` to update the instance.\n\n**From Google Cloud CLI**\n\n1. For every instance remove its public IP and assign a private IP instead:\n```\ngcloud sql instances patch --network= --no-assign-ip\n```\n\n2. Confirm the changes using the following command::\n```\ngcloud sql instances describe \n```\n\n**Prevention:**\n\nTo prevent new SQL instances from getting configured with public IP addresses, set up a `Restrict Public IP access on Cloud SQL instances` Organization policy at: https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictPublicIp(https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictPublicIp).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console: https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances)\n\n2. Ensure that every instance has a private IP address and no public IP address configured.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database instances using the following command:\n\n```\ngcloud sql instances list\n```\n\n2. For every instance of type `instanceType: CLOUD_SQL_INSTANCE` with `backendType: SECOND_GEN`, get detailed configuration. Ignore instances of type `READ_REPLICA_INSTANCE` because these instances inherit their settings from the primary instance. Also, note that first generation instances cannot be configured to have a private IP address.\n\n```\ngcloud sql instances describe \n```\n\n3. Ensure that the setting `ipAddresses` has an IP address configured of `type: PRIVATE` and has no IP address of `type: PRIMARY`. `PRIMARY` IP addresses are public addresses. An instance can have both a private and public address at the same time. Note also that you cannot use private IP with First Generation instances.", + "AdditionalInformation": "Replicas inherit their private IP status from their primary instance. You cannot configure a private IP directly on a replica.", + "References": "https://cloud.google.com/sql/docs/mysql/configure-private-ip:https://cloud.google.com/sql/docs/mysql/private-ip:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints:https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictPublicIp" + } + ] + }, + { + "Id": "6.5", + "Description": "Database Server should accept connections only from trusted Network(s)/IP(s) and restrict access from public IP addresses.", + "Checks": [ + "cloudsql_instance_public_access" + ], + "Attributes": [ + { + "Section": "6. Cloud SQL Database Services", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Database Server should accept connections only from trusted Network(s)/IP(s) and restrict access from public IP addresses.", + "RationaleStatement": "To minimize attack surface on a Database server instance, only trusted/known and required IP(s) should be white-listed to connect to it.\n\nAn authorized network should not have IPs/networks configured to `0.0.0.0/0` which will allow access to the instance from anywhere in the world. Note that authorized networks apply only to instances with public IPs.", + "ImpactStatement": "The Cloud SQL database instance would not be available to public IP addresses.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n\n2. Click the instance name to open its `Instance details` page.\n3. Under the `Configuration` section click `Edit configurations`\n4. Under `Configuration options` expand the `Connectivity` section.\n5. Click the `delete` icon for the authorized network `0.0.0.0/0`.\n6. Click `Save` to update the instance.\n\n**From Google Cloud CLI**\n\nUpdate the authorized network list by dropping off any addresses.\n\n```\ngcloud sql instances patch --authorized-networks=IP_ADDR1,IP_ADDR2...\n```\n\n**Prevention:**\n\nTo prevent new SQL instances from being configured to accept incoming connections from any IP addresses, set up a `Restrict Authorized Networks on Cloud SQL instances` Organization Policy at: https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictAuthorizedNetworks(https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictAuthorizedNetworks).", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Click the instance name to open its `Instance details` page.\n3. Under the `Configuration` section click `Edit configurations`\n4. Under `Configuration options` expand the `Connectivity` section.\n5. Ensure that no authorized network is configured to allow `0.0.0.0/0`.\n\n**From Google Cloud CLI**\n\n1. Get detailed configuration for every Cloud SQL database instance.\n\n```\ngcloud sql instances list --format=json\n```\n\nEnsure that the section `settings: ipConfiguration : authorizedNetworks` does not have any parameter `value` containing `0.0.0.0/0`.", + "AdditionalInformation": "There is no IPv6 configuration found for Google cloud SQL server services.", + "References": "https://cloud.google.com/sql/docs/mysql/configure-ip:https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictAuthorizedNetworks:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints:https://cloud.google.com/sql/docs/mysql/connection-org-policy" + } + ] + }, + { + "Id": "6.4", + "Description": "It is recommended to enforce all incoming connections to SQL database instance to use SSL.", + "Checks": [ + "cloudsql_instance_ssl_connections" + ], + "Attributes": [ + { + "Section": "6. Cloud SQL Database Services", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to enforce all incoming connections to SQL database instance to use SSL.", + "RationaleStatement": "SQL database connections if successfully trapped (MITM); can reveal sensitive data like credentials, database queries, query outputs etc.\nFor security, it is recommended to always use SSL encryption when connecting to your instance.\nThis recommendation is applicable for Postgresql, MySql generation 1, MySql generation 2 and SQL Server 2017 instances.", + "ImpactStatement": "After enforcing SSL connection, existing client will not be able to communicate with SQL server unless configured with appropriate client-certificates to communicate to SQL database instance.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n\n2. Click on an instance name to see its configuration overview.\n\n3. In the left-side panel, select `Connections`.\n\n3. In the `SSL connections` section, click `Allow only SSL connections`.\n\n4. Under `Configure SSL server certificates` click `Create new certificate`.\n\n5. Under `Configure SSL client certificates` click `Create a client certificate`. \n\n6. Follow the instructions shown to learn how to connect to your instance. \n\n**From Google Cloud CLI**\n\nTo enforce SSL encryption for an instance run the command:\n\n```\ngcloud sql instances patch --require-ssl\n```\n\nNote:\n`RESTART` is required for type MySQL Generation 1 Instances (`backendType: FIRST_GEN`) to get this configuration in effect.", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n\n2. Click on an instance name to see its configuration overview.\n\n3. In the left-side panel, select `Connections`.\n\n3. In the `SSL connections` section, ensure that `Only secured connections are allowed to connect to this instance.`.\n\n**From Google Cloud CLI**\n\n1. Get the detailed configuration for every SQL database instance using the following command:\n\n```\ngcloud sql instances list --format=json\n```\n\nEnsure that section `settings: ipConfiguration` has the parameter `requireSsl` set to `true`.", + "AdditionalInformation": "By default `Settings: ipConfiguration` has no `authorizedNetworks` set/configured. In that case even if by default `requireSsl` is not set, which is equivalent to `requireSsl:false` there is no risk as instance cannot be accessed outside of the network unless `authorizedNetworks` are configured. However, If default for `requireSsl` is not updated to `true` any `authorizedNetworks` created later on will not enforce SSL only connection.", + "References": "https://cloud.google.com/sql/docs/postgres/configure-ssl-instance/" + } + ] + }, + { + "Id": "6.1.1", + "Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances.\n\nThis recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.", + "Checks": [], + "Attributes": [ + { + "Section": "6.1. MySQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Manual", + "Description": "It is recommended to set a password for the administrative user (`root` by default) to prevent unauthorized access to the SQL database instances.\n\nThis recommendation is applicable only for MySQL Instances. PostgreSQL does not offer any setting for No Password from the cloud console.", + "RationaleStatement": "At the time of MySQL Instance creation, not providing an administrative password allows anyone to connect to the SQL database instance with administrative privileges. The root password should be set to ensure only authorized users have these privileges.", + "ImpactStatement": "Connection strings for administrative clients need to be reconfigured to use a password.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Platform Console using `https://console.cloud.google.com/sql/`\n\n2. Select the instance to open its Overview page.\n\n3. Select `Access Control > Users`.\n\n4. Click the `More actions icon` for the user to be updated.\n\n5. Select `Change password`, specify a `New password`, and click `OK`.\n\n**From Google Cloud CLI**\n\n1. Set a password to a MySql instance:\n\n```\ngcloud sql users set-password root --host= --instance= --prompt-for-password\n```\n\n2. A prompt will appear, requiring the user to enter a password:\n\n```\nInstance Password:\n```\n\n3. With a successful password configured, the following message should be seen:\n\n```\nUpdating Cloud SQL user...done.\n```", + "AuditProcedure": "**From Google Cloud CLI**\n\n1. List All SQL database instances of type MySQL:\n\n```\ngcloud sql instances list --filter='DATABASE_VERSION:MYSQL* --project --format=\"(NAME,PRIMARY_ADDRESS)\"'\n```\n\n2. For every MySQL instance try to connect using the `PRIMARY_ADDRESS`, if available:\n\n```\nmysql -u root -h \n```\n\nThe command should return either an error message or a password prompt.\n\nSample Error message:\n\n```\nERROR 1045 (28000): Access denied for user 'root'@'' (using password: NO)\n```\n\nIf a command produces the `mysql>` prompt, the MySQL instance allows anyone to connect with administrative privileges without needing a password.\n\n**Note:** The `No Password` setting is exposed only at the time of MySQL instance creation. Once the instance is created, the Google Cloud Platform Console does not expose the set to confirm whether a password for an administrative user is set to a MySQL instance.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/sql/docs/mysql/create-manage-users:https://cloud.google.com/sql/docs/mysql/create-instance" + } + ] + }, + { + "Id": "6.1.2", + "Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`", + "Checks": [ + "cloudsql_instance_mysql_skip_show_database_flag" + ], + "Attributes": [ + { + "Section": "6.1. MySQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to set `skip_show_database` database flag for Cloud SQL Mysql instance to `on`", + "RationaleStatement": "'skip_show_database' database flag prevents people from using the SHOW DATABASES statement if they do not have the SHOW DATABASES privilege. This can improve security if you have concerns about users being able to see databases belonging to other users. Its effect depends on the SHOW DATABASES privilege: If the variable value is ON, the SHOW DATABASES statement is permitted only to users who have the SHOW DATABASES privilege, and the statement displays all database names. If the value is OFF, SHOW DATABASES is permitted to all users, but displays the names of only those databases for which the user has the SHOW DATABASES or other privilege. This recommendation is applicable to Mysql database instances.", + "ImpactStatement": "", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the Mysql instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `skip_show_database` from the drop-down menu, and set its value to `on`.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database Instances\n```\ngcloud sql instances list\n```\n2. Configure the `skip_show_database` database flag for every Cloud SQL Mysql database instance using the below command.\n```\ngcloud sql instances patch INSTANCE_NAME --database-flags skip_show_database=on\n```\n\n```\nNote : \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `skip_show_database` that has been set is listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database Instances\n```\ngcloud sql instances list\n```\n2. Ensure the below command returns `on` for every Cloud SQL Mysql database instance\n```\ngcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags | select(.name==\"skip_show_database\")|.value'\n```", + "AdditionalInformation": "```\n\"WARNING: This patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/mysql/flags - to see if your \ninstance will be restarted when this patch is submitted.\n```\n\n```\nNote: some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\"\n\n```\n\n```\nNote: Configuring the above flag restarts the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/mysql/flags:https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_skip_show_database" + } + ] + }, + { + "Id": "6.1.3", + "Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.", + "Checks": [ + "cloudsql_instance_mysql_local_infile_flag" + ], + "Attributes": [ + { + "Section": "6.1. MySQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to set the `local_infile` database flag for a Cloud SQL MySQL instance to `off`.", + "RationaleStatement": "The `local_infile` flag controls the server-side LOCAL capability for LOAD DATA statements. Depending on the `local_infile` setting, the server refuses or permits local data loading by clients that have LOCAL enabled on the client side.\n\nTo explicitly cause the server to refuse LOAD DATA LOCAL statements (regardless of how client programs and libraries are configured at build time or runtime), start mysqld with local_infile disabled. local_infile can also be set at runtime.\n\nDue to security issues associated with the `local_infile` flag, it is recommended to disable it. This recommendation is applicable to MySQL database instances.", + "ImpactStatement": "Disabling `local_infile` makes the server refuse local data loading by clients that have LOCAL enabled on the client side.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the MySQL instance where the database flag needs to be enabled.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `local_infile` from the drop-down menu, and set its value to `off`.\n6. Click `Save`.\n7. Confirm the changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database instances using the following command:\n```\ngcloud sql instances list\n```\n2. Configure the `local_infile` database flag for every Cloud SQL Mysql database instance using the below command:\n```\ngcloud sql instances patch INSTANCE_NAME --database-flags local_infile=off\n```\n\n```\nNote : \n\nThis command will overwrite all database flags that were previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `local_infile` that has been set is listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database instances:\n```\ngcloud sql instances list\n```\n2. Ensure the below command returns `off` for every Cloud SQL MySQL database instance.\n```\ngcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags | select(.name==\"local_infile\")|.value'\n```", + "AdditionalInformation": "```\n\"WARNING: This patch modifies database flag values, which may require \nthe instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/mysql/flags - to see if your instance will be restarted when this patch is submitted.\n```\n\n```\nNote: some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\"\n\n```\n\n```\nNote: Configuring the above flag restarts the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/mysql/flags:https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile:https://dev.mysql.com/doc/refman/5.7/en/load-data-local.html" + } + ] + }, + { + "Id": "6.2.1", + "Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are:\n- `TERSE`\n- `DEFAULT`\n- `VERBOSE`\n\n`TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information.\n\n`VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error.\n\nEnsure an appropriate value is set to 'DEFAULT' or stricter.", + "Checks": [ + "cloudsql_instance_postgres_log_error_verbosity_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "The `log_error_verbosity` flag controls the verbosity/details of messages logged. Valid values are:\n- `TERSE`\n- `DEFAULT`\n- `VERBOSE`\n\n`TERSE` excludes the logging of `DETAIL`, `HINT`, `QUERY`, and `CONTEXT` error information.\n\n`VERBOSE` output includes the `SQLSTATE` error code, source code file name, function name, and line number that generated the error.\n\nEnsure an appropriate value is set to 'DEFAULT' or stricter.", + "RationaleStatement": "Auditing helps in troubleshooting operational problems and also permits forensic analysis. If `log_error_verbosity` is not set to the correct value, too many details or too few details may be logged. This flag should be configured with a value of 'DEFAULT' or stricter. This recommendation is applicable to PostgreSQL database instances.", + "ImpactStatement": "Turning on logging will increase the required storage over time. Mismanaged logs may cause your storage costs to increase. Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances.\n2. Select the PostgreSQL instance for which you want to enable the database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `log_error_verbosity` from the drop-down menu and set appropriate value.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the log_error_verbosity database flag for every Cloud SQL PosgreSQL database instance using the below command.\n```\ngcloud sql instances patch --database-flags log_error_verbosity=\n```\n```\nNote: This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Go to `Configuration` card\n4. Under `Database flags`, check the value of `log_error_verbosity` flag is set to 'DEFAULT' or stricter.\n\n**From Google Cloud CLI**\n\n1. Use the below command for every Cloud SQL PostgreSQL database instance to verify the value of `log_error_verbosity`\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"log_error_verbosity\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n```\n```\nNote: some database flag settings can affect instance availability or stability and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n```\n```\nNote: Configuring the above flag does not require restarting the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/postgres/flags:https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT" + } + ] + }, + { + "Id": "6.2.6", + "Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.\nEach severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.", + "Checks": [ + "cloudsql_instance_postgres_log_min_error_statement_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "The `log_min_error_statement` flag defines the minimum message severity level that are considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.\nEach severity level includes the subsequent levels mentioned above. Ensure a value of `ERROR` or stricter is set.", + "RationaleStatement": "Auditing helps in troubleshooting operational problems and also permits forensic analysis. If `log_min_error_statement` is not set to the correct value, messages may not be classified as error messages appropriately. Considering general log messages as error messages would make is difficult to find actual errors and considering only stricter severity levels as error messages may skip actual errors to log their SQL statements.\nThe `log_min_error_statement` flag should be set to `ERROR` or stricter. This recommendation is applicable to PostgreSQL database instances.", + "ImpactStatement": "Turning on logging will increase the required storage over time. Mismanaged logs may cause your storage costs to increase. Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the PostgreSQL instance for which you want to enable the database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `log_min_error_statement` from the drop-down menu and set appropriate value.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `log_min_error_statement` database flag for every Cloud SQL PosgreSQL database instance using the below command.\n```\ngcloud sql instances patch --database-flags log_min_error_statement=\n```\n```\nNote: This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Go to `Configuration` card\n4. Under `Database flags`, check the value of `log_min_error_statement` flag is configured as to `ERROR` or stricter.\n\n**From Google Cloud CLI**\n\n1. Use the below command for every Cloud SQL PostgreSQL database instance to verify the value of `log_min_error_statement` is set to `ERROR` or stricter.\n```\ngcloud sql instances list --format=json | jq '..settings.databaseFlags | select(.name==\"log_min_error_statement\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n```\n```\nNote: some database flag settings can affect instance availability or stability and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n```\n```\nNote: Configuring the above flag does not require restarting the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/postgres/flags:https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHEN" + } + ] + }, + { + "Id": "6.2.4", + "Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are:\n- `none`\n- `ddl`\n- `mod`\n- `all`\n\nThe value `ddl` logs all data definition statements.\nThe value `mod` logs all ddl statements, plus data-modifying statements.\n\nThe statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included.\n\nA value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.", + "Checks": [ + "cloudsql_instance_postgres_log_statement_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "The value of `log_statement` flag determined the SQL statements that are logged. Valid values are:\n- `none`\n- `ddl`\n- `mod`\n- `all`\n\nThe value `ddl` logs all data definition statements.\nThe value `mod` logs all ddl statements, plus data-modifying statements.\n\nThe statements are logged after a basic parsing is done and statement type is determined, thus this does not logs statements with errors. When using extended query protocol, logging occurs after an Execute message is received and values of the Bind parameters are included.\n\nA value of 'ddl' is recommended unless otherwise directed by your organization's logging policy.", + "RationaleStatement": "Auditing helps in forensic analysis. If `log_statement` is not set to the correct value, too many statements may be logged leading to issues in finding the relevant information from the logs, or too few statements may be logged with relevant information missing from the logs. Setting log_statement to align with your organization's security and logging policies facilitates later auditing and review of database activities.\nThis recommendation is applicable to PostgreSQL database instances.", + "ImpactStatement": "Turning on logging will increase the required storage over time. Mismanaged logs may cause your storage costs to increase. Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the PostgreSQL instance for which you want to enable the database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `log_statement` from the drop-down menu and set appropriate value.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `log_statement` database flag for every Cloud SQL PosgreSQL database instance using the below command.\n```\ngcloud sql instances patch --database-flags log_statement=\n```\n```\nNote: This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Go to `Configuration` card\n4. Under `Database flags`, check the value of `log_statement` flag is set to appropriately.\n\n**From Google Cloud CLI**\n\n1. Use the below command for every Cloud SQL PostgreSQL database instance to verify the value of `log_statement`\n```\ngcloud sql instances list --format=json | jq '..settings.databaseFlags | select(.name==\"log_statement\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n```\n```\nNote: some database flag settings can affect instance availability or stability and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n```\n```\nNote: Configuring the above flag does not require restarting the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/postgres/flags:https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT" + } + ] + }, + { + "Id": "6.2.9", + "Description": "Instance addresses can be public IP or private IP. Public IP means that the instance is accessible through the public internet. In contrast, instances using only private IP are not accessible through the public internet, but are accessible through a Virtual Private Cloud (VPC).\n\nLimiting network access to your database will limit potential attacks.", + "Checks": [ + "cloudsql_instance_private_ip_assignment" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Instance addresses can be public IP or private IP. Public IP means that the instance is accessible through the public internet. In contrast, instances using only private IP are not accessible through the public internet, but are accessible through a Virtual Private Cloud (VPC).\n\nLimiting network access to your database will limit potential attacks.", + "RationaleStatement": "Setting databases access only to private will reduce attack surface.", + "ImpactStatement": "If you set a database IP to private, only host from the same network will have the ability to connect your database.\n\nConfiguring an existing Cloud SQL instance to use private IP causes the instance to restart.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. In the Google Cloud console, go to the `Cloud SQL Instances` page.\n1. Open the `Overview page` of an instance by clicking the instance name.\n1. Select `Connections` from the SQL navigation menu.\n1. Check the `Private IP` checkbox. A drop-down list shows the available networks in your project.\n1. Select the VPC network you want to use:\n If you see `Private service connection required`:\n 1. Click `Set up connection`.\n 1. In the `Allocate an IP range` section, choose one of the following options:\n - Select one or more existing IP ranges or create a new one from the dropdown. The dropdown includes previously allocated ranges, if there are any, or you can select Allocate a new IP range and enter a new range and name.\n - Use an automatically allocated IP range in your network.\n Note: You can specify an address range only for a primary instance, not for a read replica or clone.\n 3. Click Continue.\n 1. Click Create connection.\n 1. Verify that you see the Private service connection for network VPC_NETWORK_NAME has been successfully created status.\n1. Optional step for Private Services Access - review reference links to VPC documents for additional detail If you want to allow other Google Cloud services such as BigQuery to access data in Cloud SQL and make queries against this data over a private IP connection, then select the Private path for Google Cloud services check box.\n1. Click Save\n\n**From Google Cloud CLI**\n\n1. List cloud SQL instances\n```\ngcloud sql instances list --format=\"json\" | jq '. | .connectionName,.ipAddresses'\n```\nNote the `project name` of the instance you want to set to a private IP, this will be \n\nNote the `instance name` of the instance you want to set to a private IP, this will be \n\nExample public instance output:\n\n```\n\"my-project-123456:us-central1:my-instance\"\n\n {\n \"ipAddress\": \"0.0.0.0\",\n \"type\": \"PRIMARY\"\n },\n {\n \"ipAddress\": \"0.0.0.0\",\n \"type\": \"OUTGOING\"\n }\n```\n\n2. run the following command to list the available VPCs \n```\ngcloud compute networks list --format=\"json\" | jq '..name'\n```\nNote the name of the VPC to use for the instance private IP, this will be \n\n3. run the following to set instance to a private IP\n```\ngcloud beta sql instances patch \\\n--project= \\\n--network=projects//global/networks/ \\\n--no-assign-ip\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. In the Google Cloud console, go to the `Cloud SQL Instances` page.\n1. Open the `Overview page` of an instance by clicking the instance name.\n1. Look for a field labeled `Private IP address` This field will only show if the Private IP option is checked. The IP listed should be in the private IP space.\n\n**From Google Cloud CLI**\n1. List cloud SQL instances\n```\ngcloud sql instances list --format=\"json\" | jq '. | .connectionName,.ipAddresses'\n```\nEach instance listed should have a `type` of `PRIVATE`.\n\n2. If you want to view a specific instance, note the (s) listed and run the following.\n```\ngcloud sql instances describe --format=\"json\" | jq '.ipAddresses'\n```\n`Type` should be `\"PRIVATE\"`\n```\n {\n \"ipAddress\": \"10.21.0.2\",\n \"type\": \"PRIVATE\"\n }\n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/sql/docs/postgres/configure-private-ip:https://cloud.google.com/vpc/docs/configure-private-services-access#procedure:https://cloud.google.com/vpc/docs/configure-private-services-access#creating-connection" + } + ] + }, + { + "Id": "6.2.8", + "Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.", + "Checks": [ + "cloudsql_instance_postgres_enable_pgaudit_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Ensure `cloudsql.enable_pgaudit` database flag for Cloud SQL PostgreSQL instance is set to `on` to allow for centralized logging.", + "RationaleStatement": "As numerous other recommendations in this section consist of turning on flags for logging purposes, your organization will need a way to manage these logs. You may have a solution already in place. If you do not, consider installing and enabling the open source pgaudit extension within PostgreSQL and enabling its corresponding flag of `cloudsql.enable_pgaudit`. This flag and installing the extension enables database auditing in PostgreSQL through the open-source pgAudit extension. This extension provides detailed session and object logging to comply with government, financial, & ISO standards and provides auditing capabilities to mitigate threats by monitoring security events on the instance. Enabling the flag and settings later in this recommendation will send these logs to Google Logs Explorer so that you can access them in a central location. to This recommendation is applicable only to PostgreSQL database instances.", + "ImpactStatement": "Enabling the pgAudit extension can lead to increased data storage requirements and to ensure durability of pgAudit log records in the event of unexpected storage issues, it is recommended to enable the `Enable automatic storage increases` setting on the instance. Enabling flags via the command line will also overwrite all existing flags, so you should apply all needed flags in the CLI command. Also flags may require a restart of the server to be implemented or will break existing functionality so update your servers at a time of low usage.", + "RemediationProcedure": "**Initialize the pgAudit flag**\n\n**From Google Cloud Console**\n\n1. Go to https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Overview` page.\n3. Click `Edit`.\n4. Scroll down and expand `Flags`.\n5. To set a flag that has not been set on the instance before, click `Add item`.\n6. Enter `cloudsql.enable_pgaudit` for the flag name and set the flag to `on`.\n7. Click `Done`.\n8. Click `Save` to update the configuration.\n9. Confirm your changes under `Flags` on the `Overview` page.\n\n**From Google Cloud CLI**\n\nRun the below command by providing `` to enable `cloudsql.enable_pgaudit` flag.\n\n```\ngcloud sql instances patch --database-flags cloudsql.enable_pgaudit=on\n```\n\nNote: `RESTART` is required to get this configuration in effect.\n\n**Creating the extension**\n\n1. Connect to the the server running PostgreSQL or through a SQL client of your choice.\n2. If SSHing to the server in the command line open the PostgreSQL shell by typing `psql`\n3. Run the following command as a superuser.\n\n```\nCREATE EXTENSION pgaudit;\n```\n\n**Updating the previously created pgaudit.log flag for your Logging Needs**\n\n**From Console:**\n\nNote: there are multiple options here. This command will enable logging for all databases on a server. Please see the customizing database audit logging reference for more flag options. \n\n1. Go to https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Overview` page.\n3. Click `Edit`.\n4. Scroll down and expand `Flags`.\n5. To set a flag that has not been set on the instance before, click `Add item`.\n6. Enter `pgaudit.log=all` for the flag name and set the flag to `on`.\n7. Click `Done`.\n8. Click `Save` to update the configuration.\n9. Confirm your changes under `Flags` on the `Overview` page.\n\n**From Command Line:**\n\nRun the command\n\n```\ngcloud sql instances patch --database-flags \\\n cloudsql.enable_pgaudit=on,pgaudit.log=all\n```\n\n**Determine if logs are being sent to Logs Explorer**\n\n1. From the Google Console home page, open the hamburger menu in the top left.\n2. In the menu that pops open, scroll down to Logs Explorer under Operations.\n3. In the query box, paste the following and search\n\nresource.type=\"cloudsql_database\"\nlogName=\"projects//logs/cloudaudit.googleapis.com%2Fdata_access\"\nprotoPayload.request.@type=\"type.googleapis.com/google.cloud.sql.audit.v1.PgAuditEntry\"\n\n If it returns any log sources, they are correctly setup.", + "AuditProcedure": "**Determining if the pgAudit Flag is set to 'on'**\n\n**From Google Cloud Console**\n\n1. Go to https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Overview` page.\n3. Click `Edit`.\n4. Scroll down and expand `Flags`.\n5. Ensure that `cloudsql.enable_pgaudit` flag is set to `on`.\n\n**From Google Cloud CLI**\n\nRun the command by providing ``. Ensure the value of the flag is `on`.\n\n``` \ngcloud sql instances describe --format=\"json\" | jq '.settings|.|.databaseFlags|select(.name==\"cloudsql.enable_pgaudit\")|.value' \n```\n\n**Determine if the pgAudit extension is installed**\n\n1. Connect to the the server running PostgreSQL or through a SQL client of your choice.\n2. Via command line open the PostgreSQL shell by typing `psql`\n3. Run the following command\n\n```\nSELECT * \nFROM pg_extension;\n```\n\n4. If pgAudit is in this list. If so, it is installed.\n\n**Determine if Data Access Audit logs are enabled for your project and have sufficient privileges**\n\n1. From the homepage open the hamburger menu in the top left.\n2. Scroll down to `IAM & Admin`and hover over it.\n3. In the menu that opens up, select `Audit Logs`\n4. In the middle of the page, in the search box next to `filter` search for `Cloud Composer API`\n5. Select it, and ensure that both 'Admin Read' and 'Data Read' are checked.\n\n**Determine if logs are being sent to Logs Explorer**\n\n1. From the Google Console home page, open the hamburger menu in the top left.\n2. In the menu that pops open, scroll down to Logs Explorer under Operations.\n3. In the query box, paste the following and search\n```\nresource.type=\"cloudsql_database\"\nlogName=\"projects//logs/cloudaudit.googleapis.com%2Fdata_access\"\nprotoPayload.request.@type=\"type.googleapis.com/google.cloud.sql.audit.v1.PgAuditEntry\"\n```\n4. If it returns any log sources, they are correctly setup.", + "AdditionalInformation": "WARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n\nNote: Configuring the 'cloudsql.enable_pgaudit' database flag requires restarting the Cloud SQL PostgreSQL instance.", + "References": "https://cloud.google.com/sql/docs/postgres/flags#list-flags-postgres:https://cloud.google.com/sql/docs/postgres/pg-audit#enable-auditing-flag:https://cloud.google.com/sql/docs/postgres/pg-audit#customizing-database-audit-logging:https://cloud.google.com/logging/docs/audit/configure-data-access#config-console-enable" + } + ] + }, + { + "Id": "6.2.2", + "Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.", + "Checks": [ + "cloudsql_instance_postgres_log_connections_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enabling the `log_connections` setting causes each attempted connection to the server to be logged, along with successful completion of client authentication. This parameter cannot be changed after the session starts.", + "RationaleStatement": "PostgreSQL does not log attempted connections by default. Enabling the `log_connections` setting will create log entries for each attempted connection as well as successful completion of client authentication which can be useful in troubleshooting issues and to determine any unusual connection attempts to the server. This recommendation is applicable to PostgreSQL database instances.", + "ImpactStatement": "Turning on logging will increase the required storage over time. Mismanaged logs may cause your storage costs to increase. Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances.\n2. Select the PostgreSQL instance for which you want to enable the database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `log_connections` from the drop-down menu and set the value as `on`.\n6. Click `Save`.\n7. Confirm the changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `log_connections` database flag for every Cloud SQL PosgreSQL database instance using the below command.\n```\ngcloud sql instances patch --database-flags log_connections=on\n```\n```\nNote: \nThis command will overwrite all previously set database flags. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page.\n3. Go to the `Configuration` card.\n4. Under `Database flags`, check the value of `log_connections` flag to determine if it is configured as expected.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns `on` for every Cloud SQL PostgreSQL database instance:\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"log_connections\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n```\n```\nNote: some database flag settings can affect instance availability or stability and remove the instance from the Cloud SQL SLA. For information about these flags, see the Operational Guidelines.\n```\n```\nNote: Configuring the above flag does not require restarting the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/postgres/flags:https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT" + } + ] + }, + { + "Id": "6.2.3", + "Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.", + "Checks": [ + "cloudsql_instance_postgres_log_disconnections_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Enabling the `log_disconnections` setting logs the end of each session, including the session duration.", + "RationaleStatement": "PostgreSQL does not log session details such as duration and session end by default. Enabling the `log_disconnections` setting will create log entries at the end of each session which can be useful in troubleshooting issues and determine any unusual activity across a time period.\nThe `log_disconnections` and `log_connections` work hand in hand and generally, the pair would be enabled/disabled together. This recommendation is applicable to PostgreSQL database instances.", + "ImpactStatement": "Turning on logging will increase the required storage over time. Mismanaged logs may cause your storage costs to increase. Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the PostgreSQL instance where the database flag needs to be enabled.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `log_disconnections` from the drop-down menu and set the value as `on`.\n6. Click `Save`.\n7. Confirm the changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `log_disconnections` database flag for every Cloud SQL PosgreSQL database instance using the below command:\n```\ngcloud sql instances patch --database-flags log_disconnections=on\n```\n```\nNote: This command will overwrite all previously set database flags. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Go to the `Configuration` card.\n4. Under `Database flags`, check the value of `log_disconnections` flag is configured as expected.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns `on` for every Cloud SQL PostgreSQL database instance:\n```\ngcloud sql instances list --format=json | jq '..settings.databaseFlags | select(.name==\"log_disconnections\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n```\n```\nNote: some database flag settings can affect instance availability or stability and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n```\n```\nNote: Configuring the above flag does not require restarting the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/postgres/flags:https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT" + } + ] + }, + { + "Id": "6.2.7", + "Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.", + "Checks": [ + "cloudsql_instance_postgres_log_min_duration_statement_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "The `log_min_duration_statement` flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that `log_min_duration_statement` is disabled, i.e., a value of `-1` is set.", + "RationaleStatement": "Logging SQL statements may include sensitive information that should not be recorded in logs. This recommendation is applicable to PostgreSQL database instances.", + "ImpactStatement": "Turning on logging will increase the required storage over time. Mismanaged logs may cause your storage costs to increase. Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the PostgreSQL instance where the database flag needs to be enabled.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `log_min_duration_statement` from the drop-down menu and set a value of `-1`.\n6. Click `Save`.\n7. Confirm the changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database instances using the following command:\n```\ngcloud sql instances list\n```\n2. Configure the `log_min_duration_statement` flag for every Cloud SQL PosgreSQL database instance using the below command:\n```\ngcloud sql instances patch --database-flags log_min_duration_statement=-1\n```\n```\nNote: This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page.\n3. Go to the `Configuration` card.\n4. Under `Database flags`, check that the value of `log_min_duration_statement` flag is set to `-1`.\n\n**From Google Cloud CLI**\n\n1. Use the below command for every Cloud SQL PostgreSQL database instance to verify the value of `log_min_duration_statement` is set to `-1`.\n```\ngcloud sql instances list --format=json| jq '.settings.databaseFlags | select(.name==\"log_min_duration_statement\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n```\n```\nNote: Some database flag settings can affect instance availability or stability and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n```\n```\nNote: Configuring the above flag does not require restarting the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/postgres/flags:https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT" + } + ] + }, + { + "Id": "6.2.5", + "Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.\nEach severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.", + "Checks": [ + "cloudsql_instance_postgres_log_min_messages_flag" + ], + "Attributes": [ + { + "Section": "6.2. PostgreSQL Database", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "The `log_min_messages` flag defines the minimum message severity level that is considered as an error statement. Messages for error statements are logged with the SQL statement. Valid values include `DEBUG5`, `DEBUG4`, `DEBUG3`, `DEBUG2`, `DEBUG1`, `INFO`, `NOTICE`, `WARNING`, `ERROR`, `LOG`, `FATAL`, and `PANIC`.\nEach severity level includes the subsequent levels mentioned above. ERROR is considered the best practice setting. Changes should only be made in accordance with the organization's logging policy.", + "RationaleStatement": "Auditing helps in troubleshooting operational problems and also permits forensic analysis. If `log_min_messages` is not set to the correct value, messages may not be classified as error messages appropriately. An organization will need to decide their own threshold for logging `log_min_messages` flag.\n\nThis recommendation is applicable to PostgreSQL database instances.", + "ImpactStatement": "Setting the threshold too low will might result in increased log storage size and length, making it difficult to find actual errors. Setting the threshold to 'Warning' will log messages for the most needed error messages. Higher severity levels may cause errors needed to troubleshoot to not be logged.\n\nNote: To effectively turn off logging failing statements, set this parameter to PANIC.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances)\n2. Select the PostgreSQL instance for which you want to enable the database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `log_min_messages` from the drop-down menu and set appropriate value.\n6. Click `Save` to save the changes.\n7. Confirm the changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `log_min_messages` database flag for every Cloud SQL PosgreSQL database instance using the below command.\n```\ngcloud sql instances patch --database-flags log_min_messages=\n```\n```\nNote: This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page.\n3. Go to the `Configuration` card.\n4. Under `Database flags`, check the value of `log_min_messages` flag is in accordance with the organization's logging policy.\n\n**From Google Cloud CLI**\n\n1. Use the below command for every Cloud SQL PostgreSQL database instance to verify that the value of `log_min_messages` is in accordance with the organization's logging policy.\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"log_min_messages\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require your instance to be restarted. Check the list of supported flags - https://cloud.google.com/sql/docs/postgres/flags - to see if your instance will be restarted when this patch is submitted.\n```\n```\nNote: Some database flag settings can affect instance availability or stability and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n```\n```\nNote: Configuring the above flag does not require restarting the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/postgres/flags:https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHEN" + } + ] + }, + { + "Id": "6.3.6", + "Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.", + "Checks": [ + "cloudsql_instance_sqlserver_trace_flag" + ], + "Attributes": [ + { + "Section": "6.3. SQL Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to set `3625 (trace flag)` database flag for Cloud SQL SQL Server instance to `on`.", + "RationaleStatement": "Microsoft SQL Trace Flags are frequently used to diagnose performance issues or to debug stored procedures or complex computer systems, but they may also be recommended by Microsoft Support to address behavior that is negatively impacting a specific workload. All documented trace flags and those recommended by Microsoft Support are fully supported in a production environment when used as directed. `3625(trace log)` Limits the amount of information returned to users who are not members of the sysadmin fixed server role, by masking the parameters of some error messages using '******'. Setting this in a Google Cloud flag for the instance allows for security through obscurity and prevents the disclosure of sensitive information, hence this is recommended to set this flag globally to on to prevent the flag having been left off, or changed by bad actors. This recommendation is applicable to SQL Server database instances.", + "ImpactStatement": "Changing flags on a database may cause it to be restarted. The best time to do this is at a time where there is low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the SQL Server instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `3625` from the drop-down menu, and set its value to `on`.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `3625` database flag for every Cloud SQL SQL Server database instance using the below command.\n```\ngcloud sql instances patch --database-flags \"3625=on\"\n```\nNote : \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `3625` that has been set is listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns `on` for every Cloud SQL SQL Server database instance\n\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"3625\")|.value'\n```", + "AdditionalInformation": "WARNING: \n\nThis patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/sqlserver/flags - to see if your \ninstance will be restarted when this patch is submitted.\n\nNote: \n\nsome database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n\nNote: \n\nConfiguring the above flag restarts the Cloud SQL instance.", + "References": "https://cloud.google.com/sql/docs/sqlserver/flags:https://docs.microsoft.com/en-us/sql/t-sql/database-console-commands/dbcc-traceon-trace-flags-transact-sql?view=sql-server-ver15#trace-flags:https://github.com/ktaranov/sqlserver-kit/blob/master/SQL%20Server%20Trace%20Flag.md" + } + ] + }, + { + "Id": "6.3.1", + "Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`", + "Checks": [ + "cloudsql_instance_sqlserver_external_scripts_enabled_flag" + ], + "Attributes": [ + { + "Section": "6.3. SQL Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to set `external scripts enabled` database flag for Cloud SQL SQL Server instance to `off`", + "RationaleStatement": "`external scripts enabled` enable the execution of scripts with certain remote language extensions. This property is OFF by default. When Advanced Analytics Services is installed, setup can optionally set this property to true. As the External Scripts Enabled feature allows scripts external to SQL such as files located in an R library to be executed, which could adversely affect the security of the system, hence this should be disabled. This recommendation is applicable to SQL Server database instances.", + "ImpactStatement": "Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the SQL Server instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `external scripts enabled` from the drop-down menu, and set its value to `off`.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `external scripts enabled` database flag for every Cloud SQL SQL Server database instance using the below command.\n```\ngcloud sql instances patch --database-flags \"external scripts enabled=off\"\n```\n\n```\nNote : \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `external scripts enabled` that has been set is listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns `off` for every Cloud SQL SQL Server database instance\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"external scripts enabled\")|.value'\n```", + "AdditionalInformation": "```\n\"WARNING: This patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/sqlserver/flags - to see if your \ninstance will be restarted when this patch is submitted.\n```\n\n```\nNote: some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\"\n\n```\n\n```\nNote: Configuring the above flag restarts the Cloud SQL instance.\n```", + "References": "https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/external-scripts-enabled-server-configuration-option?view=sql-server-ver15:https://cloud.google.com/sql/docs/sqlserver/flags:https://docs.microsoft.com/en-us/sql/advanced-analytics/concepts/security?view=sql-server-ver15:https://www.stigviewer.com/stig/ms_sql_server_2016_instance/2018-03-09/finding/V-79347" + } + ] + }, + { + "Id": "6.3.5", + "Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.", + "Checks": [ + "cloudsql_instance_sqlserver_remote_access_flag" + ], + "Attributes": [ + { + "Section": "6.3. SQL Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to set `remote access` database flag for Cloud SQL SQL Server instance to `off`.", + "RationaleStatement": "The `remote access` option controls the execution of stored procedures from local or remote servers on which instances of SQL Server are running. This default value for this option is 1. This grants permission to run local stored procedures from remote servers or remote stored procedures from the local server. To prevent local stored procedures from being run from a remote server or remote stored procedures from being run on the local server, this must be disabled. The Remote Access option controls the execution of local stored procedures on remote servers or remote stored procedures on local server. 'Remote access' functionality can be abused to launch a Denial-of-Service (DoS) attack on remote servers by off-loading query processing to a target, hence this should be disabled. This recommendation is applicable to SQL Server database instances.", + "ImpactStatement": "Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the SQL Server instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `remote access` from the drop-down menu, and set its value to `off`.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `remote access` database flag for every Cloud SQL SQL Server database instance using the below command\n```\ngcloud sql instances patch --database-flags \"remote access=off\"\n```\n\n```\nNote : \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `remote access` that has been set is listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns `off` for every Cloud SQL SQL Server database instance\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"remote access\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/sqlserver/flags - to see if your \ninstance will be restarted when this patch is submitted.\n```\n\n```\nNote: some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n\n```\n\n```\nNote: Configuring the above flag does not restart the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/sqlserver/flags:https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-remote-access-server-configuration-option?view=sql-server-ver15:https://www.stigviewer.com/stig/ms_sql_server_2016_instance/2018-03-09/finding/V-79337" + } + ] + }, + { + "Id": "6.3.3", + "Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.", + "Checks": [ + "cloudsql_instance_sqlserver_user_connections_flag" + ], + "Attributes": [ + { + "Section": "6.3. SQL Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to check the `user connections` for a Cloud SQL SQL Server instance to ensure that it is not artificially limiting connections.", + "RationaleStatement": "The `user connections` option specifies the maximum number of simultaneous user connections that are allowed on an instance of SQL Server. The actual number of user connections allowed also depends on the version of SQL Server that you are using, and also the limits of your application or applications and hardware. SQL Server allows a maximum of 32,767 user connections. Because user connections is by default a self-configuring value, with SQL Server adjusting the maximum number of user connections automatically as needed, up to the maximum value allowable. For example, if only 10 users are logged in, 10 user connection objects are allocated. In most cases, you do not have to change the value for this option. The default is 0, which means that the maximum (32,767) user connections are allowed. However if there is a number defined here that limits connections, SQL Server will not allow anymore above this limit. If the connections are at the limit, any new requests will be dropped, potentially causing lost data or outages for those using the database.", + "ImpactStatement": "Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the SQL Server instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `user connections` from the drop-down menu, and set its value to your organization recommended value.\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `user connections` database flag for every Cloud SQL SQL Server database instance using the below command.\n```\ngcloud sql instances patch --database-flags \"user connections=0-32,767\"\n```\n\n```\nNote : \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `user connections` listed under the `Database flags` section is 0.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns a value of 0, for every Cloud SQL SQL Server database instance.\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"user connections\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/sqlserver/flags - to see if your \ninstance will be restarted when this patch is submitted.\n```\n\n```\nNote: some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n```\n\n```\nNote: Configuring the above flag does not restart the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/sqlserver/flags:https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-user-connections-server-configuration-option?view=sql-server-ver15:https://www.stigviewer.com/stig/ms_sql_server_2016_instance/2018-03-09/finding/V-79119" + } + ] + }, + { + "Id": "6.3.4", + "Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.", + "Checks": [ + "cloudsql_instance_sqlserver_user_options_flag" + ], + "Attributes": [ + { + "Section": "6.3. SQL Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended that, `user options` database flag for Cloud SQL SQL Server instance should not be configured.", + "RationaleStatement": "The `user options` option specifies global defaults for all users. A list of default query processing options is established for the duration of a user's work session. The user options option allows you to change the default values of the SET options (if the server's default settings are not appropriate).\n\nA user can override these defaults by using the SET statement. You can configure user options dynamically for new logins. After you change the setting of user options, new login sessions use the new setting; current login sessions are not affected. This recommendation is applicable to SQL Server database instances.", + "ImpactStatement": "Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the SQL Server instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. Click the X next `user options` flag shown\n6. Click `Save` to save your changes.\n7. Confirm your changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. List all Cloud SQL database Instances\n```\ngcloud sql instances list\n```\n2. Clear the `user options` database flag for every Cloud SQL SQL Server database instance using either of the below commands.\n\n```\n1.Clearing all flags to their default value\n\ngcloud sql instances patch --clear-database-flags\n\nOR\n2. To clear only `user options` database flag, configure the database flag by overriding the `user options`. Exclude `user options` flag and its value, and keep all other flags you want to configure.\n\ngcloud sql instances patch --database-flags FLAG1=VALUE1,FLAG2=VALUE2\n```\n\n```\nNote : \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `user options` that has been set is not listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns empty result for every Cloud SQL SQL Server database instance\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"user options\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/sqlserver/flags - to see if your \ninstance will be restarted when this patch is submitted.\n```\n\n```\nNote: some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n\n```\n\n```\nNote: Configuring the above flag does not restart the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/sqlserver/flags:https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-user-options-server-configuration-option?view=sql-server-ver15:https://www.stigviewer.com/stig/ms_sql_server_2016_instance/2018-03-09/finding/V-79335" + } + ] + }, + { + "Id": "6.3.7", + "Description": "It is recommended to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `off`.", + "Checks": [ + "cloudsql_instance_sqlserver_contained_database_authentication_flag" + ], + "Attributes": [ + { + "Section": "6.3. SQL Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to set `contained database authentication` database flag for Cloud SQL on the SQL Server instance to `off`.", + "RationaleStatement": "A contained database includes all database settings and metadata required to define the database and has no configuration dependencies on the instance of the Database Engine where the database is installed. Users can connect to the database without authenticating a login at the Database Engine level. Isolating the database from the Database Engine makes it possible to easily move the database to another instance of SQL Server. Contained databases have some unique threats that should be understood and mitigated by SQL Server Database Engine administrators. Most of the threats are related to the USER WITH PASSWORD authentication process, which moves the authentication boundary from the Database Engine level to the database level, hence this is recommended to disable this flag. This recommendation is applicable to SQL Server database instances.", + "ImpactStatement": "When `contained database authentication` is off (0) for the instance, contained databases cannot be created, or attached to the Database Engine. Turning on logging will increase the required storage over time. Mismanaged logs may cause your storage costs to increase.Setting custom flags via command line on certain instances will cause all omitted flags to be reset to defaults. This may cause you to lose custom flags and could result in unforeseen complications or instance restarts. Because of this, it is recommended you apply these flags changes during a period of low usage.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the SQL Server instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `contained database authentication` from the drop-down menu, and set its value to `off`.\n6. Click `Save`.\n7. Confirm the changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `contained database authentication` database flag for every Cloud SQL SQL Server database instance using the below command:\n```\ngcloud sql instances patch --database-flags \"contained database authentication=off\"\n```\n\n```\nNote: \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `contained database authentication` that has been set is listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns `off` for every Cloud SQL SQL Server database instance.\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"contained database authentication\")|.value'\n```", + "AdditionalInformation": "```\nWARNING: This patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/sqlserver/flags - to see if your \ninstance will be restarted when this patch is submitted.\n```\n```\nNote: Some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n\n```\n```\nNote: Configuring the above flag does not restart the Cloud SQL instance.\n```", + "References": "https://cloud.google.com/sql/docs/sqlserver/flags:https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/contained-database-authentication-server-configuration-option?view=sql-server-ver15:https://docs.microsoft.com/en-us/sql/relational-databases/databases/security-best-practices-with-contained-databases?view=sql-server-ver15" + } + ] + }, + { + "Id": "6.3.2", + "Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.", + "Checks": [ + "cloudsql_instance_sqlserver_cross_db_ownership_chaining_flag" + ], + "Attributes": [ + { + "Section": "6.3. SQL Server", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended to set `cross db ownership chaining` database flag for Cloud SQL SQL Server instance to `off`.", + "RationaleStatement": "Use the `cross db ownership` for chaining option to configure cross-database ownership chaining for an instance of Microsoft SQL Server. This server option allows you to control cross-database ownership chaining at the database level or to allow cross-database ownership chaining for all databases. Enabling `cross db ownership` is not recommended unless all of the databases hosted by the instance of SQL Server must participate in cross-database ownership chaining and you are aware of the security implications of this setting. This recommendation is applicable to SQL Server database instances.", + "ImpactStatement": "Updating flags may cause the database to restart. This may cause it to unavailable for a short amount of time, so this is best done at a time of low usage. You should also determine if the tables in your databases reference another table without using credentials for that database, as turning off cross database ownership will break this relationship.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances(https://console.cloud.google.com/sql/instances).\n2. Select the SQL Server instance for which you want to enable to database flag.\n3. Click `Edit`.\n4. Scroll down to the `Flags` section.\n5. To set a flag that has not been set on the instance before, click `Add item`, choose the flag `cross db ownership chaining` from the drop-down menu, and set its value to `off`.\n6. Click `Save`.\n7. Confirm the changes under `Flags` on the Overview page.\n\n**From Google Cloud CLI**\n\n1. Configure the `cross db ownership chaining` database flag for every Cloud SQL SQL Server database instance using the below command:\n```\ngcloud sql instances patch --database-flags \"cross db ownership chaining=off\"\n```\n\nNote: \n\nThis command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign (\"=\").", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to the Cloud SQL Instances page in the Google Cloud Console.\n2. Select the instance to open its `Instance Overview` page\n3. Ensure the database flag `cross db ownership chaining` that has been set is listed under the `Database flags` section.\n\n**From Google Cloud CLI**\n\n1. Ensure the below command returns `off` for every Cloud SQL SQL Server database instance:\n```\ngcloud sql instances list --format=json | jq '.settings.databaseFlags | select(.name==\"cross db ownership chaining\")|.value'\n```", + "AdditionalInformation": "WARNING: This patch modifies database flag values, which may require \nyour instance to be restarted. Check the list of supported flags - \nhttps://cloud.google.com/sql/docs/sqlserver/flags - to see if your \ninstance will be restarted when this patch is submitted.\n\nNote: Some database flag settings can affect instance availability or stability, and remove the instance from the Cloud SQL SLA. For information about these flags, see Operational Guidelines.\n\nNote: Configuring the above flag does not restart the Cloud SQL instance.", + "References": "https://cloud.google.com/sql/docs/sqlserver/flags:https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/cross-db-ownership-chaining-server-configuration-option?view=sql-server-ver15" + } + ] + }, + { + "Id": "7.3", + "Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets.", + "Checks": [ + "bigquery_dataset_cmk_encryption" + ], + "Attributes": [ + { + "Section": "7. BigQuery", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets.", + "RationaleStatement": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. This is seamless and does not require any additional input from the user.\n\nFor greater control over the encryption, customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets. Setting a Default Customer-managed encryption key (CMEK) for a data set ensure any tables created in future will use the specified CMEK if none other is provided.\n\n```\nNote: Google does not store your keys on its servers and cannot access your protected data unless you provide the key. This also means that if you forget or lose your key, there is no way for Google to recover the key or to recover any data encrypted with the lost key.\n```", + "ImpactStatement": "Using Customer-managed encryption keys (CMEK) will incur additional labor-hour investment to create, protect, and manage the keys.", + "RemediationProcedure": "**From Google Cloud CLI**\n\nThe default CMEK for existing data sets can be updated by specifying the default key in the `EncryptionConfiguration.kmsKeyName` field when calling the `datasets.insert` or `datasets.patch` methods", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Analytics`\n2. Go to `BigQuery`\n3. Under `Analysis` click on `SQL Workspaces`, select the project\n4. Select Data Set\n5. Ensure `Customer-managed key` is present under `Dataset info` section.\n6. Repeat for each data set in all projects.\n\n**From Google Cloud CLI**\n\nList all dataset names\n```\nbq ls\n```\nUse the following command to view each dataset details.\n```\nbq show \n```\nVerify the `kmsKeyName` is present.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/bigquery/docs/customer-managed-encryption" + } + ] + }, + { + "Id": "7.2", + "Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets. If CMEK is used, the CMEK is used to encrypt the data encryption keys instead of using google-managed encryption keys.", + "Checks": [ + "bigquery_table_cmk_encryption" + ], + "Attributes": [ + { + "Section": "7. BigQuery", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. The data is encrypted using the `data encryption keys` and data encryption keys themselves are further encrypted using `key encryption keys`. This is seamless and do not require any additional input from the user. However, if you want to have greater control, Customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery Data Sets. If CMEK is used, the CMEK is used to encrypt the data encryption keys instead of using google-managed encryption keys.", + "RationaleStatement": "BigQuery by default encrypts the data as rest by employing `Envelope Encryption` using Google managed cryptographic keys. This is seamless and does not require any additional input from the user.\n\nFor greater control over the encryption, customer-managed encryption keys (CMEK) can be used as encryption key management solution for BigQuery tables. The CMEK is used to encrypt the data encryption keys instead of using google-managed encryption keys. BigQuery stores the table and CMEK association and the encryption/decryption is done automatically.\n\nApplying the Default Customer-managed keys on BigQuery data sets ensures that all the new tables created in the future will be encrypted using CMEK but existing tables need to be updated to use CMEK individually.\n\n```\nNote: Google does not store your keys on its servers and cannot access your protected data unless you provide the key. This also means that if you forget or lose your key, there is no way for Google to recover the key or to recover any data encrypted with the lost key.\n```", + "ImpactStatement": "Using Customer-managed encryption keys (CMEK) will incur additional labor-hour investment to create, protect, and manage the keys.", + "RemediationProcedure": "**From Google Cloud CLI**\nUse the following command to copy the data. The source and the destination needs to be same in case copying to the original table.\n```\nbq cp --destination_kms_key source_dataset.source_table destination_dataset.destination_table\n```", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `Analytics`\n2. Go to `BigQuery`\n3. Under `SQL Workspace`, select the project\n4. Select Data Set, select the table\n5. Go to `Details` tab\n6. Under `Table info`, verify `Customer-managed key` is present.\n7. Repeat for each table in all data sets for all projects.\n\n**From Google Cloud CLI**\n\nList all dataset names\n```\nbq ls\n```\nUse the following command to view the table details. Verify the `kmsKeyName` is present.\n```\nbq show \n```", + "AdditionalInformation": "", + "References": "https://cloud.google.com/bigquery/docs/customer-managed-encryption" + } + ] + }, + { + "Id": "7.1", + "Description": "It is recommended that the IAM policy on BigQuery datasets does not allow anonymous and/or public access.", + "Checks": [ + "bigquery_dataset_public_access" + ], + "Attributes": [ + { + "Section": "7. BigQuery", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "It is recommended that the IAM policy on BigQuery datasets does not allow anonymous and/or public access.", + "RationaleStatement": "Granting permissions to `allUsers` or `allAuthenticatedUsers` allows anyone to access the dataset. Such access might not be desirable if sensitive data is being stored in the dataset. Therefore, ensure that anonymous and/or public access to a dataset is not allowed.", + "ImpactStatement": "The dataset is not publicly accessible. Explicit modification of IAM privileges would be necessary to make them publicly accessible.", + "RemediationProcedure": "**From Google Cloud Console**\n\n1. Go to `BigQuery` by visiting: https://console.cloud.google.com/bigquery(https://console.cloud.google.com/bigquery).\n2. Select the dataset from 'Resources'.\n3. Click `SHARING` near the right side of the window and select `Permissions`.\n4. Review each attached role.\n5. Click the delete icon for each member `allUsers` or `allAuthenticatedUsers`. On the popup click `Remove`.\n\n**From Google Cloud CLI**\n\nList the name of all datasets.\n```\nbq ls\n```\nRetrieve the data set details: \n```\nbq show --format=prettyjson PROJECT_ID:DATASET_NAME > PATH_TO_FILE\n```\nIn the access section of the JSON file, update the dataset information to remove all roles containing `allUsers` or `allAuthenticatedUsers`.\n\nUpdate the dataset:\n```\nbq update --source PATH_TO_FILE PROJECT_ID:DATASET_NAME\n```\n\n**Prevention:**\n\nYou can prevent Bigquery dataset from becoming publicly accessible by setting up the `Domain restricted sharing` organization policy at: https://console.cloud.google.com/iam-admin/orgpolicies/iam-allowedPolicyMemberDomains .", + "AuditProcedure": "**From Google Cloud Console**\n\n1. Go to `BigQuery` by visiting: https://console.cloud.google.com/bigquery(https://console.cloud.google.com/bigquery).\n2. Select a dataset from `Resources`.\n3. Click `SHARING` near the right side of the window and select `Permissions`.\n4. Validate that none of the attached roles contain `allUsers` or `allAuthenticatedUsers`.\n\n**From Google Cloud CLI**\n\nList the name of all datasets.\n```\nbq ls\n```\nRetrieve each dataset details using the following command:\n```\nbq show PROJECT_ID:DATASET_NAME\n```\nEnsure that `allUsers` and `allAuthenticatedUsers` have not been granted access to the dataset.", + "AdditionalInformation": "", + "References": "https://cloud.google.com/bigquery/docs/dataset-access-controls" + } + ] + } + ] +} diff --git a/prowler/config/config.py b/prowler/config/config.py index bd56f8e6..5362283b 100644 --- a/prowler/config/config.py +++ b/prowler/config/config.py @@ -24,17 +24,16 @@ banner_color = "\033[1;92m" # Compliance actual_directory = pathlib.Path(os.path.dirname(os.path.realpath(__file__))) -compliance_aws_dir = f"{actual_directory}/../compliance/aws" available_compliance_frameworks = [] -with os.scandir(compliance_aws_dir) as files: - files = [ - file.name - for file in files - if file.is_file() - and file.name.endswith(".json") - and available_compliance_frameworks.append(file.name.removesuffix(".json")) - ] - +for provider in ["aws", "gcp"]: + with os.scandir(f"{actual_directory}/../compliance/{provider}") as files: + files = [ + file.name + for file in files + if file.is_file() + and file.name.endswith(".json") + and available_compliance_frameworks.append(file.name.removesuffix(".json")) + ] # AWS services-regions matrix json aws_services_json_file = "aws_regions_by_service.json" diff --git a/prowler/lib/outputs/compliance.py b/prowler/lib/outputs/compliance.py index ee1b5922..1a8706b0 100644 --- a/prowler/lib/outputs/compliance.py +++ b/prowler/lib/outputs/compliance.py @@ -8,10 +8,11 @@ from prowler.config.config import orange_color, timestamp from prowler.lib.check.models import Check_Report from prowler.lib.logger import logger from prowler.lib.outputs.models import ( + Check_Output_CSV_AWS_CIS, Check_Output_CSV_AWS_ISO27001_2013, Check_Output_CSV_AWS_Well_Architected, - Check_Output_CSV_CIS, Check_Output_CSV_ENS_RD2022, + Check_Output_CSV_GCP_CIS, Check_Output_CSV_Generic_Compliance, Check_Output_MITRE_ATTACK, generate_csv_fields, @@ -29,7 +30,10 @@ def add_manual_controls(output_options, audit_info, file_descriptors): manual_finding.status = "INFO" manual_finding.status_extended = "Manual check" manual_finding.resource_id = "manual_check" + manual_finding.resource_name = "Manual check" manual_finding.region = "" + manual_finding.location = "" + manual_finding.project_id = "" fill_compliance( output_options, manual_finding, audit_info, file_descriptors ) @@ -86,38 +90,70 @@ def fill_compliance(output_options, finding, audit_info, file_descriptors): elif compliance.Framework == "CIS" and "cis_" in str( output_options.output_modes ): - compliance_output = "cis_" + compliance.Version + "_aws" + compliance_output = ( + "cis_" + compliance.Version + "_" + compliance.Provider.lower() + ) # Only with the version of CIS that was selected if compliance_output in str(output_options.output_modes): for requirement in compliance.Requirements: requirement_description = requirement.Description requirement_id = requirement.Id for attribute in requirement.Attributes: - compliance_row = Check_Output_CSV_CIS( - Provider=finding.check_metadata.Provider, - Description=compliance.Description, - AccountId=audit_info.audited_account, - Region=finding.region, - AssessmentDate=timestamp.isoformat(), - Requirements_Id=requirement_id, - Requirements_Description=requirement_description, - Requirements_Attributes_Section=attribute.Section, - Requirements_Attributes_Profile=attribute.Profile, - Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus, - Requirements_Attributes_Description=attribute.Description, - Requirements_Attributes_RationaleStatement=attribute.RationaleStatement, - Requirements_Attributes_ImpactStatement=attribute.ImpactStatement, - Requirements_Attributes_RemediationProcedure=attribute.RemediationProcedure, - Requirements_Attributes_AuditProcedure=attribute.AuditProcedure, - Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation, - Requirements_Attributes_References=attribute.References, - Status=finding.status, - StatusExtended=finding.status_extended, - ResourceId=finding.resource_id, - CheckId=finding.check_metadata.CheckID, - ) - - csv_header = generate_csv_fields(Check_Output_CSV_CIS) + if compliance.Provider == "AWS": + compliance_row = Check_Output_CSV_AWS_CIS( + Provider=finding.check_metadata.Provider, + Description=compliance.Description, + AccountId=audit_info.audited_account, + Region=finding.region, + AssessmentDate=timestamp.isoformat(), + Requirements_Id=requirement_id, + Requirements_Description=requirement_description, + Requirements_Attributes_Section=attribute.Section, + Requirements_Attributes_Profile=attribute.Profile, + Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus, + Requirements_Attributes_Description=attribute.Description, + Requirements_Attributes_RationaleStatement=attribute.RationaleStatement, + Requirements_Attributes_ImpactStatement=attribute.ImpactStatement, + Requirements_Attributes_RemediationProcedure=attribute.RemediationProcedure, + Requirements_Attributes_AuditProcedure=attribute.AuditProcedure, + Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation, + Requirements_Attributes_References=attribute.References, + Status=finding.status, + StatusExtended=finding.status_extended, + ResourceId=finding.resource_id, + CheckId=finding.check_metadata.CheckID, + ) + csv_header = generate_csv_fields( + Check_Output_CSV_AWS_CIS + ) + elif compliance.Provider == "GCP": + compliance_row = Check_Output_CSV_GCP_CIS( + Provider=finding.check_metadata.Provider, + Description=compliance.Description, + ProjectId=finding.project_id, + Location=finding.location, + AssessmentDate=timestamp.isoformat(), + Requirements_Id=requirement_id, + Requirements_Description=requirement_description, + Requirements_Attributes_Section=attribute.Section, + Requirements_Attributes_Profile=attribute.Profile, + Requirements_Attributes_AssessmentStatus=attribute.AssessmentStatus, + Requirements_Attributes_Description=attribute.Description, + Requirements_Attributes_RationaleStatement=attribute.RationaleStatement, + Requirements_Attributes_ImpactStatement=attribute.ImpactStatement, + Requirements_Attributes_RemediationProcedure=attribute.RemediationProcedure, + Requirements_Attributes_AuditProcedure=attribute.AuditProcedure, + Requirements_Attributes_AdditionalInformation=attribute.AdditionalInformation, + Requirements_Attributes_References=attribute.References, + Status=finding.status, + StatusExtended=finding.status_extended, + ResourceId=finding.resource_id, + ResourceName=finding.resource_name, + CheckId=finding.check_metadata.CheckID, + ) + csv_header = generate_csv_fields( + Check_Output_CSV_GCP_CIS + ) elif ( "AWS-Well-Architected-Framework" in compliance.Framework @@ -412,7 +448,7 @@ def display_compliance_table( print( f" - CSV: {output_directory}/{output_filename}_{compliance_framework}.csv\n" ) - elif "cis_1." in compliance_framework: + elif "cis_" in compliance_framework: sections = {} cis_compliance_table = { "Provider": [], diff --git a/prowler/lib/outputs/file_descriptors.py b/prowler/lib/outputs/file_descriptors.py index bd5ab690..a2339e12 100644 --- a/prowler/lib/outputs/file_descriptors.py +++ b/prowler/lib/outputs/file_descriptors.py @@ -14,10 +14,11 @@ from prowler.lib.outputs.html import add_html_header from prowler.lib.outputs.models import ( Aws_Check_Output_CSV, Azure_Check_Output_CSV, + Check_Output_CSV_AWS_CIS, Check_Output_CSV_AWS_ISO27001_2013, Check_Output_CSV_AWS_Well_Architected, - Check_Output_CSV_CIS, Check_Output_CSV_ENS_RD2022, + Check_Output_CSV_GCP_CIS, Check_Output_CSV_Generic_Compliance, Check_Output_MITRE_ATTACK, Gcp_Check_Output_CSV, @@ -120,6 +121,14 @@ def fill_file_descriptors(output_modes, output_directory, output_filename, audit ) file_descriptors.update({output_mode: file_descriptor}) + elif isinstance(audit_info, GCP_Audit_Info): + if output_mode == "cis_2.0_gcp": + filename = f"{output_directory}/{output_filename}_cis_2.0_gcp{csv_file_suffix}" + file_descriptor = initialize_file_descriptor( + filename, output_mode, audit_info, Check_Output_CSV_GCP_CIS + ) + file_descriptors.update({output_mode: file_descriptor}) + elif isinstance(audit_info, AWS_Audit_Info): if output_mode == "json-asff": filename = f"{output_directory}/{output_filename}{json_asff_file_suffix}" @@ -141,14 +150,14 @@ def fill_file_descriptors(output_modes, output_directory, output_filename, audit elif output_mode == "cis_1.5_aws": filename = f"{output_directory}/{output_filename}_cis_1.5_aws{csv_file_suffix}" file_descriptor = initialize_file_descriptor( - filename, output_mode, audit_info, Check_Output_CSV_CIS + filename, output_mode, audit_info, Check_Output_CSV_AWS_CIS ) file_descriptors.update({output_mode: file_descriptor}) elif output_mode == "cis_1.4_aws": filename = f"{output_directory}/{output_filename}_cis_1.4_aws{csv_file_suffix}" file_descriptor = initialize_file_descriptor( - filename, output_mode, audit_info, Check_Output_CSV_CIS + filename, output_mode, audit_info, Check_Output_CSV_AWS_CIS ) file_descriptors.update({output_mode: file_descriptor}) diff --git a/prowler/lib/outputs/models.py b/prowler/lib/outputs/models.py index 9e199a88..f1ca76aa 100644 --- a/prowler/lib/outputs/models.py +++ b/prowler/lib/outputs/models.py @@ -539,7 +539,7 @@ class Check_Output_CSV_ENS_RD2022(BaseModel): CheckId: str -class Check_Output_CSV_CIS(BaseModel): +class Check_Output_CSV_AWS_CIS(BaseModel): """ Check_Output_CSV_CIS generates a finding's output in CSV CIS format. """ @@ -567,6 +567,35 @@ class Check_Output_CSV_CIS(BaseModel): CheckId: str +class Check_Output_CSV_GCP_CIS(BaseModel): + """ + Check_Output_CSV_CIS generates a finding's output in CSV CIS format. + """ + + Provider: str + Description: str + ProjectId: str + Location: str + AssessmentDate: str + Requirements_Id: str + Requirements_Description: str + Requirements_Attributes_Section: str + Requirements_Attributes_Profile: str + Requirements_Attributes_AssessmentStatus: str + Requirements_Attributes_Description: str + Requirements_Attributes_RationaleStatement: str + Requirements_Attributes_ImpactStatement: str + Requirements_Attributes_RemediationProcedure: str + Requirements_Attributes_AuditProcedure: str + Requirements_Attributes_AdditionalInformation: str + Requirements_Attributes_References: str + Status: str + StatusExtended: str + ResourceId: str + ResourceName: str + CheckId: str + + class Check_Output_CSV_Generic_Compliance(BaseModel): """ Check_Output_CSV_Generic_Compliance generates a finding's output in CSV Generic Compliance format. diff --git a/prowler/lib/outputs/outputs.py b/prowler/lib/outputs/outputs.py index f4283ab3..13e776fd 100644 --- a/prowler/lib/outputs/outputs.py +++ b/prowler/lib/outputs/outputs.py @@ -86,25 +86,24 @@ def report(check_findings, output_options, audit_info): if file_descriptors: # Check if --quiet to only add fails to outputs if not (finding.status != "FAIL" and output_options.is_quiet): + if any( + compliance in output_options.output_modes + for compliance in available_compliance_frameworks + ): + fill_compliance( + output_options, + finding, + audit_info, + file_descriptors, + ) + + add_manual_controls( + output_options, + audit_info, + file_descriptors, + ) # AWS specific outputs if finding.check_metadata.Provider == "aws": - if any( - compliance in output_options.output_modes - for compliance in available_compliance_frameworks - ): - fill_compliance( - output_options, - finding, - audit_info, - file_descriptors, - ) - - add_manual_controls( - output_options, - audit_info, - file_descriptors, - ) - if "json-asff" in file_descriptors: finding_output = Check_Output_JSON_ASFF() fill_json_asff( diff --git a/prowler/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured.py b/prowler/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured.py index 156b952f..b354996c 100644 --- a/prowler/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured.py +++ b/prowler/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured.py @@ -11,7 +11,8 @@ class apikeys_api_restrictions_configured(Check): report.resource_id = key.id report.resource_name = key.name report.status = "PASS" - report.status_extended = f"API key {key.name} have restrictions configured." + report.location = apikeys_client.region + report.status_extended = f"API key {key.name} has restrictions configured." if key.restrictions == {} or any( [ target.get("service") == "cloudapis.googleapis.com" diff --git a/prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/__init__.py b/prowler/providers/gcp/services/apikeys/apikeys_key_exists/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/__init__.py rename to prowler/providers/gcp/services/apikeys/apikeys_key_exists/__init__.py diff --git a/prowler/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists.metadata.json b/prowler/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists.metadata.json new file mode 100644 index 00000000..7520e23e --- /dev/null +++ b/prowler/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "apikeys_key_exists", + "CheckTitle": "Ensure API Keys Only Exist for Active Services", + "CheckType": [], + "ServiceName": "apikeys", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "API Key", + "Description": "API Keys should only be used for services in cases where other authentication methods are unavailable. Unused keys with their permissions in tact may still exist within a project. Keys are insecure because they can be viewed publicly, such as from within a browser, or they can be accessed on a device where the key resides. It is recommended to use standard authentication flow instead.", + "Risk": "Security risks involved in using API-Keys appear below: API keys are simple encrypted strings, API keys do not identify the user or the application making the API request, API keys are typically accessible to clients, making it easy to discover and steal an API key.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "gcloud alpha services api-keys delete", + "NativeIaC": "", + "Other": "", + "Terraform": "" + }, + "Recommendation": { + "Text": "To avoid the security risk in using API keys, it is recommended to use standard authentication flow instead.", + "Url": "https://cloud.google.com/docs/authentication/api-keys" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists.py b/prowler/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists.py new file mode 100644 index 00000000..2e9069ca --- /dev/null +++ b/prowler/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists.py @@ -0,0 +1,22 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.apikeys.apikeys_client import apikeys_client + + +class apikeys_key_exists(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for project in apikeys_client.project_ids: + report = Check_Report_GCP(self.metadata()) + report.project_id = project + report.resource_id = project + report.location = apikeys_client.region + report.status = "PASS" + report.status_extended = f"Project {project} does not have active API Keys." + for key in apikeys_client.keys: + if key.project_id == project: + report.status = "FAIL" + report.status_extended = f"Project {project} has active API Keys." + break + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days.py b/prowler/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days.py index bdab628f..77a8d870 100644 --- a/prowler/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days.py +++ b/prowler/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days.py @@ -12,6 +12,7 @@ class apikeys_key_rotated_in_90_days(Check): report.project_id = key.project_id report.resource_id = key.id report.resource_name = key.name + report.location = apikeys_client.region report.status = "PASS" report.status_extended = f"API key {key.name} created in less than 90 days." if ( @@ -20,7 +21,7 @@ class apikeys_key_rotated_in_90_days(Check): ).days > 90: report.status = "FAIL" report.status_extended = ( - f"API key {key.name} creation date have more than 90 days." + f"API key {key.name} creation date has more than 90 days." ) findings.append(report) diff --git a/prowler/providers/gcp/services/apikeys/apikeys_service.py b/prowler/providers/gcp/services/apikeys/apikeys_service.py index 4553f206..6b921911 100644 --- a/prowler/providers/gcp/services/apikeys/apikeys_service.py +++ b/prowler/providers/gcp/services/apikeys/apikeys_service.py @@ -11,6 +11,7 @@ class APIKeys: self.api_version = "v2" self.project_ids = audit_info.project_ids self.default_project_id = audit_info.default_project_id + self.region = "global" self.client = generate_client(self.service, self.api_version, audit_info) self.keys = [] self.__get_keys__() diff --git a/prowler/providers/gcp/services/cloudresourcemanager/cloudresourcemanager_service.py b/prowler/providers/gcp/services/cloudresourcemanager/cloudresourcemanager_service.py index c28edd94..7773fb8d 100644 --- a/prowler/providers/gcp/services/cloudresourcemanager/cloudresourcemanager_service.py +++ b/prowler/providers/gcp/services/cloudresourcemanager/cloudresourcemanager_service.py @@ -13,7 +13,10 @@ class CloudResourceManager: self.project_ids = audit_info.project_ids self.client = generate_client(self.service, self.api_version, audit_info) self.bindings = [] + self.projects = [] + self.organizations = [] self.__get_iam_policy__() + self.__get_organizations__() def __get_client__(self): return self.client @@ -24,6 +27,12 @@ class CloudResourceManager: policy = ( self.client.projects().getIamPolicy(resource=project_id).execute() ) + audit_logging = False + if policy.get("auditConfigs"): + audit_logging = True + self.projects.append( + Project(id=project_id, audit_logging=audit_logging) + ) for binding in policy["bindings"]: self.bindings.append( Binding( @@ -37,8 +46,30 @@ class CloudResourceManager: f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) + def __get_organizations__(self): + try: + response = self.client.organizations().search().execute() + for org in response["organizations"]: + self.organizations.append( + Organization(id=org["name"].split("/")[-1], name=org["displayName"]) + ) + except Exception as error: + logger.error( + f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + class Binding(BaseModel): role: str members: list project_id: str + + +class Project(BaseModel): + id: str + audit_logging: bool + + +class Organization(BaseModel): + id: str + name: str diff --git a/prowler/providers/gcp/services/cloudsql/cloudsql_instance_postgres_log_min_duration_statement_flag/cloudsql_instance_postgres_log_min_duration_statement_flag.metadata.json b/prowler/providers/gcp/services/cloudsql/cloudsql_instance_postgres_log_min_duration_statement_flag/cloudsql_instance_postgres_log_min_duration_statement_flag.metadata.json index 62e2d165..4dd56d51 100644 --- a/prowler/providers/gcp/services/cloudsql/cloudsql_instance_postgres_log_min_duration_statement_flag/cloudsql_instance_postgres_log_min_duration_statement_flag.metadata.json +++ b/prowler/providers/gcp/services/cloudsql/cloudsql_instance_postgres_log_min_duration_statement_flag/cloudsql_instance_postgres_log_min_duration_statement_flag.metadata.json @@ -1,14 +1,14 @@ { "Provider": "gcp", "CheckID": "cloudsql_instance_postgres_log_min_duration_statement_flag", - "CheckTitle": "Ensure that the Log_min_error_statement Flag for a Cloud SQL PostgreSQL Instance Is Set to -1", + "CheckTitle": "Ensure that the Log_min_duration_statement Flag for a Cloud SQL PostgreSQL Instance Is Set to -1", "CheckType": [], "ServiceName": "cloudsql", "SubServiceName": "", "ResourceIdTemplate": "", "Severity": "medium", "ResourceType": "DatabaseInstance", - "Description": "Ensure that the Log_min_error_statement Flag for a Cloud SQL PostgreSQL Instance Is Set to -1", + "Description": "Ensure that the Log_min_duration_statement Flag for a Cloud SQL PostgreSQL Instance Is Set to -1", "Risk": "The log_min_duration_statement flag defines the minimum amount of execution time of a statement in milliseconds where the total duration of the statement is logged. Ensure that log_min_duration_statement is disabled, i.e., a value of -1 is set.", "RelatedUrl": "", "Remediation": { diff --git a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use/__init__.py b/prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_default_service_account_in_use/__init__.py rename to prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/__init__.py diff --git a/prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.metadata.json b/prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.metadata.json new file mode 100644 index 00000000..6880e298 --- /dev/null +++ b/prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "cloudstorage_bucket_log_retention_policy_lock", + "CheckTitle": "Ensure That Retention Policies on Cloud Storage Buckets Used for Exporting Logs Are Configured Using Bucket Lock", + "CheckType": [], + "ServiceName": "cloudstorage", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "Bucket", + "Description": "Enabling retention policies on log buckets will protect logs stored in cloud storage buckets from being overwritten or accidentally deleted.", + "Risk": "Sinks can be configured to export logs in storage buckets. It is recommended to configure a data retention policy for these cloud storage buckets and to lock the data retention policy; thus permanently preventing the policy from being reduced or removed. This way, if the system is ever compromised by an attacker or a malicious insider who wants to cover their tracks, the activity logs are definitely preserved for forensics and security investigations.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudStorage/retention-policies-with-bucket-lock.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-retention-policies-on-log-buckets-are-configured-using-bucket-lock#terraform" + }, + "Recommendation": { + "Text": "It is recommended to set up retention policies and configure Bucket Lock on all storage buckets that are used as log sinks.", + "Url": "https://cloud.google.com/storage/docs/using-uniform-bucket-level-access" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.py b/prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.py new file mode 100644 index 00000000..eed64ea1 --- /dev/null +++ b/prowler/providers/gcp/services/cloudstorage/cloudstorage_bucket_log_retention_policy_lock/cloudstorage_bucket_log_retention_policy_lock.py @@ -0,0 +1,35 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.cloudstorage.cloudstorage_client import ( + cloudstorage_client, +) +from prowler.providers.gcp.services.logging.logging_client import logging_client + + +class cloudstorage_bucket_log_retention_policy_lock(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + # Get Log Sink Buckets + log_buckets = [] + for sink in logging_client.sinks: + if "storage.googleapis.com" in sink.destination: + log_buckets.append(sink.destination.split("/")[-1]) + for bucket in cloudstorage_client.buckets: + if bucket.name in log_buckets: + report = Check_Report_GCP(self.metadata()) + report.project_id = bucket.project_id + report.resource_id = bucket.id + report.resource_name = bucket.name + report.location = bucket.region + report.status = "FAIL" + report.status_extended = ( + f"Log Sink Bucket {bucket.name} has no Retention Policy" + ) + if bucket.retention_policy: + report.status = "FAIL" + report.status_extended = f"Log Sink Bucket {bucket.name} has no Retention Policy but without Bucket Lock" + if bucket.retention_policy["isLocked"]: + report.status = "PASS" + report.status_extended = f"Log Sink Bucket {bucket.name} has a Retention Policy with Bucket Lock" + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/cloudstorage/cloudstorage_service.py b/prowler/providers/gcp/services/cloudstorage/cloudstorage_service.py index 8e07739a..ed68db9f 100644 --- a/prowler/providers/gcp/services/cloudstorage/cloudstorage_service.py +++ b/prowler/providers/gcp/services/cloudstorage/cloudstorage_service.py @@ -1,3 +1,5 @@ +from typing import Optional + from pydantic import BaseModel from prowler.lib.logger import logger @@ -40,6 +42,7 @@ class CloudStorage: "uniformBucketLevelAccess" ]["enabled"], public=public, + retention_policy=bucket.get("retentionPolicy"), project_id=project_id, ) ) @@ -60,3 +63,4 @@ class Bucket(BaseModel): uniform_bucket_level_access: bool public: bool project_id: str + retention_policy: Optional[dict] diff --git a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/__init__.py b/prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/__init__.py rename to prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/__init__.py diff --git a/prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed.metadata.json b/prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/compute_firewall_rdp_access_from_the_internet_allowed.metadata.json similarity index 96% rename from prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed.metadata.json rename to prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/compute_firewall_rdp_access_from_the_internet_allowed.metadata.json index 559fd491..51307ba3 100644 --- a/prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/compute_firewall_rdp_access_from_the_internet_allowed.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_rdp_access_from_the_internet_allowed", + "CheckID": "compute_firewall_rdp_access_from_the_internet_allowed", "CheckTitle": "Ensure That RDP Access Is Restricted From the Internet", "CheckType": [], "ServiceName": "networking", diff --git a/prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed.py b/prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/compute_firewall_rdp_access_from_the_internet_allowed.py similarity index 93% rename from prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed.py rename to prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/compute_firewall_rdp_access_from_the_internet_allowed.py index 936e57e1..1a8091ad 100644 --- a/prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed.py +++ b/prowler/providers/gcp/services/compute/compute_firewall_rdp_access_from_the_internet_allowed/compute_firewall_rdp_access_from_the_internet_allowed.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_rdp_access_from_the_internet_allowed(Check): +class compute_firewall_rdp_access_from_the_internet_allowed(Check): def execute(self) -> Check_Report_GCP: findings = [] for firewall in compute_client.firewalls: @@ -10,6 +10,7 @@ class compute_rdp_access_from_the_internet_allowed(Check): report.project_id = firewall.project_id report.resource_id = firewall.id report.resource_name = firewall.name + report.location = compute_client.region report.status = "PASS" report.status_extended = f"Firewall {firewall.name} does not expose port 3389 (RDP) to the internet." opened_port = False diff --git a/prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/__init__.py b/prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/__init__.py rename to prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/__init__.py diff --git a/prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed.metadata.json b/prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/compute_firewall_ssh_access_from_the_internet_allowed.metadata.json similarity index 97% rename from prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed.metadata.json rename to prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/compute_firewall_ssh_access_from_the_internet_allowed.metadata.json index 6d293323..e903b0fc 100644 --- a/prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/compute_firewall_ssh_access_from_the_internet_allowed.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_ssh_access_from_the_internet_allowed", + "CheckID": "compute_firewall_ssh_access_from_the_internet_allowed", "CheckTitle": "Ensure That SSH Access Is Restricted From the Internet", "CheckType": [], "ServiceName": "networking", diff --git a/prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed.py b/prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/compute_firewall_ssh_access_from_the_internet_allowed.py similarity index 93% rename from prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed.py rename to prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/compute_firewall_ssh_access_from_the_internet_allowed.py index fa530ce9..1020d600 100644 --- a/prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed.py +++ b/prowler/providers/gcp/services/compute/compute_firewall_ssh_access_from_the_internet_allowed/compute_firewall_ssh_access_from_the_internet_allowed.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_ssh_access_from_the_internet_allowed(Check): +class compute_firewall_ssh_access_from_the_internet_allowed(Check): def execute(self) -> Check_Report_GCP: findings = [] for firewall in compute_client.firewalls: @@ -10,6 +10,7 @@ class compute_ssh_access_from_the_internet_allowed(Check): report.project_id = firewall.project_id report.resource_id = firewall.id report.resource_name = firewall.name + report.location = compute_client.region report.status = "PASS" report.status_extended = f"Firewall {firewall.name} does not expose port 22 (SSH) to the internet." opened_port = False diff --git a/prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/__init__.py rename to prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/__init__.py diff --git a/prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/compute_instance_block_project_wide_ssh_keys_disabled.metadata.json similarity index 95% rename from prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled.metadata.json rename to prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/compute_instance_block_project_wide_ssh_keys_disabled.metadata.json index 218225bc..76219675 100644 --- a/prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/compute_instance_block_project_wide_ssh_keys_disabled.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_block_project_wide_ssh_keys_disabled", + "CheckID": "compute_instance_block_project_wide_ssh_keys_disabled", "CheckTitle": "Ensure “Block Project-Wide SSH Keys” Is Enabled for VM Instances", "CheckType": [], "ServiceName": "compute", diff --git a/prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled.py b/prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/compute_instance_block_project_wide_ssh_keys_disabled.py similarity index 94% rename from prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled.py rename to prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/compute_instance_block_project_wide_ssh_keys_disabled.py index 3ad6a106..3e093fcb 100644 --- a/prowler/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled.py +++ b/prowler/providers/gcp/services/compute/compute_instance_block_project_wide_ssh_keys_disabled/compute_instance_block_project_wide_ssh_keys_disabled.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_block_project_wide_ssh_keys_disabled(Check): +class compute_instance_block_project_wide_ssh_keys_disabled(Check): def execute(self) -> Check_Report_GCP: findings = [] for instance in compute_client.instances: diff --git a/prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/__init__.py rename to prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/__init__.py diff --git a/prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled.metadata.json new file mode 100644 index 00000000..e36b3336 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "compute_instance_confidential_computing_enabled", + "CheckTitle": "Ensure Compute Instances Have Confidential Computing Enabled", + "CheckType": [], + "ServiceName": "compute", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "VMInstance", + "Description": "Ensure that the Confidential Computing security feature is enabled for your Google Cloud virtual machine (VM) instances in order to add protection to your sensitive data in use by keeping it encrypted in memory and using encryption keys that Google doesn't have access to. Confidential Computing is a breakthrough technology which encrypts data while it is being processed. This technology keeps data encrypted in memory, outside the CPU.", + "Risk": "Confidential Computing keeps your sensitive data encrypted while it is used, indexed, queried, or trained on, and does not allow Google to access the encryption keys (these keys are generated in hardware, per VM instance, and can't be exported). In this way, the Confidential Computing feature can help alleviate concerns about risk related to either dependency on Google Cloud infrastructure or Google insiders' access to your data in the clear.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/ComputeEngine/confidential-computing.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure that the Confidential Computing security feature is enabled for your Google Cloud virtual machine (VM) instances in order to add protection to your sensitive data in use by keeping it encrypted in memory and using encryption keys that Google doesn't have access to. Confidential Computing is a breakthrough technology which encrypts data while it is being processed. This technology keeps data encrypted in memory, outside the CPU.", + "Url": "https://cloud.google.com/compute/confidential-vm/docs/creating-cvm-instance:https://cloud.google.com/compute/confidential-vm/docs/about-cvm:https://cloud.google.com/confidential-computing:https://cloud.google.com/blog/products/identity-security/introducing-google-cloud-confidential-computing-with-confidential-vms" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled.py b/prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled.py new file mode 100644 index 00000000..c35fe3c5 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled.py @@ -0,0 +1,23 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.compute.compute_client import compute_client + + +class compute_instance_confidential_computing_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for instance in compute_client.instances: + report = Check_Report_GCP(self.metadata()) + report.project_id = instance.project_id + report.resource_id = instance.id + report.resource_name = instance.name + report.location = instance.zone + report.status = "PASS" + report.status_extended = ( + f"VM Instance {instance.name} has Confidential Computing enabled" + ) + if not instance.confidential_computing: + report.status = "FAIL" + report.status_extended = f"VM Instance {instance.name} does not have Confidential Computing enabled" + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/compute/compute_serial_ports_in_use/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_serial_ports_in_use/__init__.py rename to prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/__init__.py diff --git a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/compute_instance_default_service_account_in_use.metadata.json similarity index 96% rename from prowler/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use.metadata.json rename to prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/compute_instance_default_service_account_in_use.metadata.json index 25b19dbc..e1d8aa15 100644 --- a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/compute_instance_default_service_account_in_use.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_default_service_account_in_use", + "CheckID": "compute_instance_default_service_account_in_use", "CheckTitle": "Ensure That Instances Are Not Configured To Use the Default Service Account", "CheckType": [], "ServiceName": "compute", diff --git a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use.py b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/compute_instance_default_service_account_in_use.py similarity index 95% rename from prowler/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use.py rename to prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/compute_instance_default_service_account_in_use.py index ad7a4dfa..0f210cbb 100644 --- a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use.py +++ b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use/compute_instance_default_service_account_in_use.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_default_service_account_in_use(Check): +class compute_instance_default_service_account_in_use(Check): def execute(self) -> Check_Report_GCP: findings = [] for instance in compute_client.instances: diff --git a/prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/__init__.py rename to prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/__init__.py diff --git a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/compute_instance_default_service_account_in_use_with_full_api_access.metadata.json similarity index 95% rename from prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access.metadata.json rename to prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/compute_instance_default_service_account_in_use_with_full_api_access.metadata.json index ddf636c8..513bcc0f 100644 --- a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/compute_instance_default_service_account_in_use_with_full_api_access.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_default_service_account_in_use_with_full_api_access", + "CheckID": "compute_instance_default_service_account_in_use_with_full_api_access", "CheckTitle": "Ensure That Instances Are Not Configured To Use the Default Service Account With Full Access to All Cloud APIs", "CheckType": [], "ServiceName": "compute", diff --git a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access.py b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/compute_instance_default_service_account_in_use_with_full_api_access.py similarity index 94% rename from prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access.py rename to prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/compute_instance_default_service_account_in_use_with_full_api_access.py index 1723c25d..02e80439 100644 --- a/prowler/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access.py +++ b/prowler/providers/gcp/services/compute/compute_instance_default_service_account_in_use_with_full_api_access/compute_instance_default_service_account_in_use_with_full_api_access.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_default_service_account_in_use_with_full_api_access(Check): +class compute_instance_default_service_account_in_use_with_full_api_access(Check): def execute(self) -> Check_Report_GCP: findings = [] for instance in compute_client.instances: diff --git a/prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/__init__.py similarity index 100% rename from prowler/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/__init__.py rename to prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/__init__.py diff --git a/prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/compute_instance_encryption_with_csek_enabled.metadata.json similarity index 97% rename from prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled.metadata.json rename to prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/compute_instance_encryption_with_csek_enabled.metadata.json index 23ac41e1..1fdc8e5c 100644 --- a/prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/compute_instance_encryption_with_csek_enabled.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_encryption_with_csek_is_disabled", + "CheckID": "compute_instance_encryption_with_csek_enabled", "CheckTitle": "Ensure VM Disks for Critical VMs Are Encrypted With Customer-Supplied Encryption Keys (CSEK)", "CheckType": [], "ServiceName": "compute", diff --git a/prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled.py b/prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/compute_instance_encryption_with_csek_enabled.py similarity index 76% rename from prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled.py rename to prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/compute_instance_encryption_with_csek_enabled.py index 184042d0..ff0e0744 100644 --- a/prowler/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled.py +++ b/prowler/providers/gcp/services/compute/compute_instance_encryption_with_csek_enabled/compute_instance_encryption_with_csek_enabled.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_encryption_with_csek_is_disabled(Check): +class compute_instance_encryption_with_csek_enabled(Check): def execute(self) -> Check_Report_GCP: findings = [] for instance in compute_client.instances: @@ -12,11 +12,11 @@ class compute_encryption_with_csek_is_disabled(Check): report.resource_name = instance.name report.location = instance.zone report.status = "FAIL" - report.status_extended = f"The VM Instance {instance.name} have the following unencrypted disks: '{', '.join([i[0] for i in instance.disks_encryption if not i[1]])}'" + report.status_extended = f"The VM Instance {instance.name} has the following unencrypted disks: '{', '.join([i[0] for i in instance.disks_encryption if not i[1]])}'" if all([i[1] for i in instance.disks_encryption]): report.status = "PASS" report.status_extended = ( - f"The VM Instance {instance.name} have every disk encrypted." + f"The VM Instance {instance.name} has every disk encrypted." ) findings.append(report) diff --git a/prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/compute_instance_ip_forwarding_is_enabled.metadata.json similarity index 97% rename from prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled.metadata.json rename to prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/compute_instance_ip_forwarding_is_enabled.metadata.json index 5483cb1b..58f2045f 100644 --- a/prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/compute_instance_ip_forwarding_is_enabled.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_ip_forwarding_is_enabled", + "CheckID": "compute_instance_ip_forwarding_is_enabled", "CheckTitle": "Ensure That IP Forwarding Is Not Enabled on Instances", "CheckType": [], "ServiceName": "compute", diff --git a/prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled.py b/prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/compute_instance_ip_forwarding_is_enabled.py similarity index 94% rename from prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled.py rename to prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/compute_instance_ip_forwarding_is_enabled.py index dddcbb69..94e8b804 100644 --- a/prowler/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled.py +++ b/prowler/providers/gcp/services/compute/compute_instance_ip_forwarding_is_enabled/compute_instance_ip_forwarding_is_enabled.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_ip_forwarding_is_enabled(Check): +class compute_instance_ip_forwarding_is_enabled(Check): def execute(self) -> Check_Report_GCP: findings = [] for instance in compute_client.instances: diff --git a/prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/compute_instance_serial_ports_in_use.metadata.json similarity index 97% rename from prowler/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use.metadata.json rename to prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/compute_instance_serial_ports_in_use.metadata.json index 6a123b2e..147af062 100644 --- a/prowler/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/compute_instance_serial_ports_in_use.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_serial_ports_in_use", + "CheckID": "compute_instance_serial_ports_in_use", "CheckTitle": "Ensure ‘Enable Connecting to Serial Ports’ Is Not Enabled for VM Instance", "CheckType": [], "ServiceName": "compute", diff --git a/prowler/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use.py b/prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/compute_instance_serial_ports_in_use.py similarity index 79% rename from prowler/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use.py rename to prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/compute_instance_serial_ports_in_use.py index d44d21f5..ca3cdb87 100644 --- a/prowler/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use.py +++ b/prowler/providers/gcp/services/compute/compute_instance_serial_ports_in_use/compute_instance_serial_ports_in_use.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_serial_ports_in_use(Check): +class compute_instance_serial_ports_in_use(Check): def execute(self) -> Check_Report_GCP: findings = [] for instance in compute_client.instances: @@ -12,7 +12,9 @@ class compute_serial_ports_in_use(Check): report.resource_name = instance.name report.location = instance.zone report.status = "PASS" - report.status_extended = f"VM Instance {instance.name} have ‘Enable Connecting to Serial Ports’ off" + report.status_extended = ( + f"VM Instance {instance.name} has Enable Connecting to Serial Ports off" + ) if instance.metadata.get("items"): for item in instance.metadata["items"]: if item["key"] == "serial-port-enable" and item["value"] in [ @@ -20,7 +22,7 @@ class compute_serial_ports_in_use(Check): "true", ]: report.status = "FAIL" - report.status_extended = f"VM Instance {instance.name} have ‘Enable Connecting to Serial Ports’ set to on" + report.status_extended = f"VM Instance {instance.name} has Enable Connecting to Serial Ports set to on" break findings.append(report) diff --git a/prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled.metadata.json b/prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/compute_instance_shielded_vm_enabled.metadata.json similarity index 97% rename from prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled.metadata.json rename to prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/compute_instance_shielded_vm_enabled.metadata.json index 9b368686..305fa6d3 100644 --- a/prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled.metadata.json +++ b/prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/compute_instance_shielded_vm_enabled.metadata.json @@ -1,6 +1,6 @@ { "Provider": "gcp", - "CheckID": "compute_shielded_vm_enabled", + "CheckID": "compute_instance_shielded_vm_enabled", "CheckTitle": "Ensure Compute Instances Are Launched With Shielded VM Enabled", "CheckType": [], "ServiceName": "compute", diff --git a/prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled.py b/prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/compute_instance_shielded_vm_enabled.py similarity index 85% rename from prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled.py rename to prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/compute_instance_shielded_vm_enabled.py index 5d4e3226..9db85d6c 100644 --- a/prowler/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled.py +++ b/prowler/providers/gcp/services/compute/compute_instance_shielded_vm_enabled/compute_instance_shielded_vm_enabled.py @@ -2,7 +2,7 @@ from prowler.lib.check.models import Check, Check_Report_GCP from prowler.providers.gcp.services.compute.compute_client import compute_client -class compute_shielded_vm_enabled(Check): +class compute_instance_shielded_vm_enabled(Check): def execute(self) -> Check_Report_GCP: findings = [] for instance in compute_client.instances: @@ -12,13 +12,13 @@ class compute_shielded_vm_enabled(Check): report.resource_name = instance.name report.location = instance.zone report.status = "PASS" - report.status_extended = f"VM Instance {instance.name} have vTPM or Integrity Monitoring set to on" + report.status_extended = f"VM Instance {instance.name} has vTPM or Integrity Monitoring set to on" if ( not instance.shielded_enabled_vtpm or not instance.shielded_enabled_integrity_monitoring ): report.status = "FAIL" - report.status_extended = f"VM Instance {instance.name} don't have vTPM and Integrity Monitoring set to on" + report.status_extended = f"VM Instance {instance.name} doesn't have vTPM and Integrity Monitoring set to on" findings.append(report) return findings diff --git a/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled.metadata.json b/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled.metadata.json new file mode 100644 index 00000000..2a3cca53 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "compute_loadbalancer_logging_enabled", + "CheckTitle": "Ensure Logging is enabled for HTTP(S) Load Balancer", + "CheckType": [], + "ServiceName": "compute", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "LoadBalancer", + "Description": "Logging enabled on a HTTPS Load Balancer will show all network traffic and its destination.", + "Risk": "HTTP(S) load balancing log entries contain information useful for monitoring and debugging web traffic. Google Cloud exports this logging data to Cloud Monitoring service so that monitoring metrics can be created to evaluate a load balancer's configuration, usage, and performance, troubleshoot problems, and improve resource utilization and user experience.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "gcloud compute backend-services update --region=REGION --enable-logging --logging-sample-rate=", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudLoadBalancing/enableLoad-balancing-backend-service-logging.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "Logging will allow you to view HTTPS network traffic to your web applications.", + "Url": "https://cloud.google.com/load-balancing/docs/https/https-logging-monitoring#gcloud:-global-mode" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled.py b/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled.py new file mode 100644 index 00000000..b8020e5e --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled.py @@ -0,0 +1,23 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.compute.compute_client import compute_client + + +class compute_loadbalancer_logging_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for lb in compute_client.load_balancers: + report = Check_Report_GCP(self.metadata()) + report.project_id = lb.project_id + report.resource_id = lb.id + report.resource_name = lb.name + report.location = compute_client.region + report.status = "PASS" + report.status_extended = f"LoadBalancer {lb.name} has logging enabled" + if not lb.logging: + report.status = "FAIL" + report.status_extended = ( + f"LoadBalancer {lb.name} does not have logging enabled" + ) + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled.metadata.json b/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled.metadata.json new file mode 100644 index 00000000..924cd4f5 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "compute_network_dns_logging_enabled", + "CheckTitle": "Enable Cloud DNS Logging for VPC Networks", + "CheckType": [], + "ServiceName": "compute", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "Network", + "Description": "Ensure that Cloud DNS logging is enabled for all your Virtual Private Cloud (VPC) networks using DNS server policies. Cloud DNS logging records queries that the name servers resolve for your Google Cloud VPC networks, as well as queries from external entities directly to a public DNS zone. Recorded queries can come from virtual machine (VM) instances, GKE containers running in the same VPC network, peering zones, or other Google Cloud resources provisioned within your VPC.", + "Risk": "Cloud DNS logging is disabled by default on each Google Cloud VPC network. By enabling monitoring of Cloud DNS logs, you can increase visibility into the DNS names requested by the clients within your VPC network. Cloud DNS logs can be monitored for anomalous domain names and evaluated against threat intelligence.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/dns-logging-for-vpcs.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "Cloud DNS logging records the queries from the name servers within your VPC to Stackdriver. Logged queries can come from Compute Engine VMs, GKE containers, or other GCP resources provisioned within the VPC.", + "Url": "https://cloud.google.com/dns/docs/monitoring" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled.py b/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled.py new file mode 100644 index 00000000..5d38f1e6 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled.py @@ -0,0 +1,28 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.compute.compute_client import compute_client +from prowler.providers.gcp.services.dns.dns_client import dns_client + + +class compute_network_dns_logging_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for network in compute_client.networks: + report = Check_Report_GCP(self.metadata()) + report.project_id = network.project_id + report.resource_id = network.id + report.resource_name = network.name + report.location = compute_client.region + report.status = "FAIL" + report.status_extended = ( + f"Network {network.name} does not have DNS logging enabled" + ) + for policy in dns_client.policies: + if network.name in policy.networks and policy.logging: + report.status = "PASS" + report.status_extended = ( + f"Network {network.name} has DNS logging enabled" + ) + break + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/compute/compute_network_not_legacy/__init__.py b/prowler/providers/gcp/services/compute/compute_network_not_legacy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy.metadata.json b/prowler/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy.metadata.json new file mode 100644 index 00000000..e412ff93 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "compute_network_not_legacy", + "CheckTitle": "Ensure Legacy Networks Do Not Exist", + "CheckType": [], + "ServiceName": "compute", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "Network", + "Description": "In order to prevent use of legacy networks, a project should not have a legacy network configured. As of now, Legacy Networks are gradually being phased out, and you can no longer create projects with them. This recommendation is to check older projects to ensure that they are not using Legacy Networks.", + "Risk": "Google Cloud legacy networks have a single global IPv4 range which cannot be divided into subnets, and a single gateway IP address for the whole network. Legacy networks do not support several Google Cloud networking features such as subnets, alias IP ranges, multiple network interfaces, Cloud NAT (Network Address Translation), Virtual Private Cloud (VPC) Peering, and private access options for GCP services. Legacy networks are not recommended for high network traffic projects and are subject to a single point of contention or failure.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "gcloud compute networks delete ", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/legacy-vpc-in-use.html#", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-legacy-networks-do-not-exist-for-a-project#terraform" + }, + "Recommendation": { + "Text": "Ensure that your Google Cloud Platform (GCP) projects are not using legacy networks as this type of network is no longer recommended for production environments because it does not support advanced networking features. Instead, it is strongly recommended to use Virtual Private Cloud (VPC) networks for existing and future GCP projects.", + "Url": "https://cloud.google.com/vpc/docs/using-legacy#deleting_a_legacy_network" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy.py b/prowler/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy.py new file mode 100644 index 00000000..64e5db39 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy.py @@ -0,0 +1,21 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.compute.compute_client import compute_client + + +class compute_network_not_legacy(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for network in compute_client.networks: + report = Check_Report_GCP(self.metadata()) + report.project_id = network.project_id + report.resource_id = network.id + report.resource_name = network.name + report.location = compute_client.region + report.status = "PASS" + report.status_extended = f"Network {network.name} is not legacy" + if network.subnet_mode == "legacy": + report.status = "FAIL" + report.status_extended = f"Legacy network {network.name} exists" + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled.metadata.json b/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled.metadata.json new file mode 100644 index 00000000..ad1fd1cc --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "compute_project_os_login_enabled", + "CheckTitle": "Ensure Os Login Is Enabled for a Project", + "CheckType": [], + "ServiceName": "compute", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "low", + "ResourceType": "Project", + "Description": "Ensure that the OS Login feature is enabled at the Google Cloud Platform (GCP) project level in order to provide you with centralized and automated SSH key pair management.", + "Risk": "Enabling OS Login feature ensures that the SSH keys used to connect to VM instances are mapped with Google Cloud IAM users. Revoking access to corresponding IAM users will revoke all the SSH keys associated with these users, therefore it facilitates centralized SSH key pair management, which is extremely useful in handling compromised or stolen SSH key pairs and/or revocation of external/third-party/vendor users.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "https://docs.bridgecrew.io/docs/bc_gcp_networking_9#cli-command", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/ComputeEngine/enable-os-login.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_gcp_networking_9#terraform" + }, + "Recommendation": { + "Text": "Ensure that the OS Login feature is enabled at the Google Cloud Platform (GCP) project level in order to provide you with centralized and automated SSH key pair management.", + "Url": "https://cloud.google.com/compute/confidential-vm/docs/creating-cvm-instance:https://cloud.google.com/compute/confidential-vm/docs/about-cvm:https://cloud.google.com/confidential-computing:https://cloud.google.com/blog/products/identity-security/introducing-google-cloud-confidential-computing-with-confidential-vms" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled.py b/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled.py new file mode 100644 index 00000000..37a2f62b --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled.py @@ -0,0 +1,22 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.compute.compute_client import compute_client + + +class compute_project_os_login_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for project in compute_client.projects: + report = Check_Report_GCP(self.metadata()) + report.project_id = project.id + report.resource_id = project.id + report.location = "global" + report.status = "PASS" + report.status_extended = f"Project {project.id} has OS Login enabled" + if not project.enable_oslogin: + report.status = "FAIL" + report.status_extended = ( + f"Project {project.id} does not have OS Login enabled" + ) + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/compute/compute_service.py b/prowler/providers/gcp/services/compute/compute_service.py index 1252eaa4..a59d306e 100644 --- a/prowler/providers/gcp/services/compute/compute_service.py +++ b/prowler/providers/gcp/services/compute/compute_service.py @@ -12,15 +12,23 @@ class Compute: self.project_ids = audit_info.project_ids self.default_project_id = audit_info.default_project_id self.client = generate_client(self.service, self.api_version, audit_info) + self.region = "global" self.regions = set() self.zones = set() self.instances = [] self.networks = [] + self.subnets = [] self.firewalls = [] + self.projects = [] + self.load_balancers = [] + self.__get_url_maps__() + self.__describe_backend_service__() self.__get_regions__() + self.__get_projects__() self.__get_zones__() self.__get_instances__() self.__get_networks__() + self.__get_subnetworks__() self.__get_firewalls__() def __get_regions__(self): @@ -59,6 +67,22 @@ class Compute: f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) + def __get_projects__(self): + for project_id in self.project_ids: + try: + enable_oslogin = False + response = self.client.projects().get(project=project_id).execute() + for item in response["commonInstanceMetadata"].get("items", []): + if item["key"] == "enable-oslogin" and item["value"] == "TRUE": + enable_oslogin = True + self.projects.append( + Project(id=project_id, enable_oslogin=enable_oslogin) + ) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + def __get_instances__(self): for project_id in self.project_ids: try: @@ -88,6 +112,9 @@ class Compute: shielded_enabled_integrity_monitoring=instance[ "shieldedInstanceConfig" ]["enableIntegrityMonitoring"], + confidential_computing=instance[ + "confidentialInstanceConfig" + ]["enableConfidentialCompute"], service_accounts=instance["serviceAccounts"], ip_forward=instance.get("canIpForward", False), disks_encryption=[ @@ -120,10 +147,18 @@ class Compute: while request is not None: response = request.execute() for network in response.get("items", []): + subnet_mode = ( + "legacy" + if "autoCreateSubnetworks" not in network + else "auto" + if network["autoCreateSubnetworks"] + else "custom" + ) self.networks.append( Network( name=network["name"], id=network["id"], + subnet_mode=subnet_mode, project_id=project_id, ) ) @@ -136,6 +171,35 @@ class Compute: f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) + def __get_subnetworks__(self): + for project_id in self.project_ids: + try: + for region in self.regions: + request = self.client.subnetworks().list( + project=project_id, region=region + ) + while request is not None: + response = request.execute() + for subnet in response.get("items", []): + self.subnets.append( + Subnet( + name=subnet["name"], + id=subnet["id"], + project_id=project_id, + flow_logs=subnet.get("enableFlowLogs", False), + network=subnet["network"].split("/")[-1], + region=region, + ) + ) + + request = self.client.subnetworks().list_next( + previous_request=request, previous_response=response + ) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + def __get_firewalls__(self): for project_id in self.project_ids: try: @@ -163,6 +227,47 @@ class Compute: f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) + def __get_url_maps__(self): + for project_id in self.project_ids: + try: + request = self.client.urlMaps().list(project=project_id) + while request is not None: + response = request.execute() + for urlmap in response.get("items", []): + self.load_balancers.append( + LoadBalancer( + name=urlmap["name"], + id=urlmap["id"], + service=urlmap["defaultService"], + project_id=project_id, + ) + ) + + request = self.client.urlMaps().list_next( + previous_request=request, previous_response=response + ) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + def __describe_backend_service__(self): + for balancer in self.load_balancers: + try: + response = ( + self.client.backendServices() + .get( + project=balancer.project_id, + backendService=balancer.service.split("/")[-1], + ) + .execute() + ) + balancer.logging = response.get("logConfig", False).get("enable", False) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + class Instance(BaseModel): name: str @@ -173,6 +278,7 @@ class Instance(BaseModel): metadata: dict shielded_enabled_vtpm: bool shielded_enabled_integrity_monitoring: bool + confidential_computing: bool service_accounts: list ip_forward: bool disks_encryption: list @@ -181,9 +287,19 @@ class Instance(BaseModel): class Network(BaseModel): name: str id: str + subnet_mode: str project_id: str +class Subnet(BaseModel): + name: str + id: str + network: str + project_id: str + flow_logs: bool + region: str + + class Firewall(BaseModel): name: str id: str @@ -191,3 +307,16 @@ class Firewall(BaseModel): direction: str allowed_rules: list project_id: str + + +class Project(BaseModel): + id: str + enable_oslogin: bool + + +class LoadBalancer(BaseModel): + name: str + id: str + service: str + logging: bool = False + project_id: str diff --git a/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/__init__.py b/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled.metadata.json b/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled.metadata.json new file mode 100644 index 00000000..4f64233a --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "compute_subnet_flow_logs_enabled", + "CheckTitle": "Enable VPC Flow Logs for VPC Subnets", + "CheckType": [], + "ServiceName": "compute", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "Subnet", + "Description": "Ensure that VPC Flow Logs is enabled for every subnet created within your production Virtual Private Cloud (VPC) network. Flow Logs is a logging feature that enables users to capture information about the IP traffic (accepted, rejected, or all traffic) going to and from the network interfaces (ENIs) available within your VPC subnets.", + "Risk": "By default, the VPC Flow Logs feature is disabled when a new VPC network subnet is created. Once enabled, VPC Flow Logs will start collecting network traffic data to and from your Virtual Private Cloud (VPC) subnets, logging data that can be useful for understanding network usage, network traffic expense optimization, network forensics, and real-time security analysis. To enhance Google Cloud VPC network visibility and security it is strongly recommended to enable Flow Logs for every business-critical or production VPC subnet.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "gcloud compute networks subnets update [SUBNET_NAME] --region [REGION] --enable-flow-logs", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudVPC/enable-vpc-flow-logs.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_gcp_logging_1#terraform" + }, + "Recommendation": { + "Text": "Ensure that VPC Flow Logs is enabled for every subnet created within your production Virtual Private Cloud (VPC) network. Flow Logs is a logging feature that enables users to capture information about the IP traffic (accepted, rejected, or all traffic) going to and from the network interfaces (ENIs) available within your VPC subnets.", + "Url": "https://cloud.google.com/vpc/docs/using-flow-logs#enabling_vpc_flow_logging" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled.py b/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled.py new file mode 100644 index 00000000..6d838132 --- /dev/null +++ b/prowler/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled.py @@ -0,0 +1,21 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.compute.compute_client import compute_client + + +class compute_subnet_flow_logs_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for subnet in compute_client.subnets: + report = Check_Report_GCP(self.metadata()) + report.project_id = subnet.project_id + report.resource_id = subnet.id + report.resource_name = subnet.name + report.location = subnet.region + report.status = "PASS" + report.status_extended = f"Subnet {subnet.name} in network {subnet.network} has flow logs enabled" + if not subnet.flow_logs: + report.status = "FAIL" + report.status_extended = f"Subnet {subnet.name} in network {subnet.network} does not have flow logs enabled" + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled.py b/prowler/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled.py index 39175a08..5595f026 100644 --- a/prowler/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled.py +++ b/prowler/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled.py @@ -12,12 +12,12 @@ class dns_dnssec_disabled(Check): report.resource_name = managed_zone.name report.status = "PASS" report.status_extended = ( - f"Cloud DNS {managed_zone.name} have DNSSEC enabled." + f"Cloud DNS {managed_zone.name} has DNSSEC enabled." ) if not managed_zone.dnssec: report.status = "FAIL" report.status_extended = ( - f"Cloud DNS {managed_zone.name} doens't have DNSSEC enabled." + f"Cloud DNS {managed_zone.name} doesn't have DNSSEC enabled." ) findings.append(report) diff --git a/prowler/providers/gcp/services/dns/dns_service.py b/prowler/providers/gcp/services/dns/dns_service.py index ff504698..90629ca1 100644 --- a/prowler/providers/gcp/services/dns/dns_service.py +++ b/prowler/providers/gcp/services/dns/dns_service.py @@ -12,8 +12,11 @@ class DNS: self.project_ids = audit_info.project_ids self.default_project_id = audit_info.default_project_id self.client = generate_client(self.service, self.api_version, audit_info) + self.region = "global" self.managed_zones = [] self.__get_managed_zones__() + self.policies = [] + self.__get_policies__() def __get_managed_zones__(self): for project_id in self.project_ids: @@ -42,6 +45,35 @@ class DNS: f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" ) + def __get_policies__(self): + for project_id in self.project_ids: + try: + request = self.client.policies().list(project=project_id) + while request is not None: + response = request.execute() + + for policy in response.get("policies", []): + policy_networks = [] + for network in policy.get("networks", []): + policy_networks.append(network["networkUrl"].split("/")[-1]) + self.policies.append( + Policy( + name=policy["name"], + id=policy["id"], + logging=policy.get("enableLogging", False), + networks=policy_networks, + project_id=project_id, + ) + ) + + request = self.client.policies().list_next( + previous_request=request, previous_response=response + ) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + class ManagedZone(BaseModel): name: str @@ -49,3 +81,11 @@ class ManagedZone(BaseModel): dnssec: bool key_specs: list project_id: str + + +class Policy(BaseModel): + name: str + id: str + logging: bool + networks: list + project_id: str diff --git a/prowler/providers/gcp/services/iam/accessapproval_client.py b/prowler/providers/gcp/services/iam/accessapproval_client.py new file mode 100644 index 00000000..f4f201a9 --- /dev/null +++ b/prowler/providers/gcp/services/iam/accessapproval_client.py @@ -0,0 +1,4 @@ +from prowler.providers.gcp.lib.audit_info.audit_info import gcp_audit_info +from prowler.providers.gcp.services.iam.iam_service import AccessApproval + +accessapproval_client = AccessApproval(gcp_audit_info) diff --git a/prowler/providers/gcp/services/iam/essentialcontacts_client.py b/prowler/providers/gcp/services/iam/essentialcontacts_client.py new file mode 100644 index 00000000..9bb82ed8 --- /dev/null +++ b/prowler/providers/gcp/services/iam/essentialcontacts_client.py @@ -0,0 +1,4 @@ +from prowler.providers.gcp.lib.audit_info.audit_info import gcp_audit_info +from prowler.providers.gcp.services.iam.iam_service import EssentialContacts + +essentialcontacts_client = EssentialContacts(gcp_audit_info) diff --git a/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/__init__.py b/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled.metadata.json b/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled.metadata.json new file mode 100644 index 00000000..cef77354 --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "iam_account_access_approval_enabled", + "CheckTitle": "Ensure Access Approval is Enabled in your account", + "CheckType": [], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "Account", + "Description": "Ensure that Access Approval is enabled within your Google Cloud Platform (GCP) account in order to allow you to require your explicit approval whenever Google personnel need to access your GCP projects. Once the Access Approval feature is enabled, you can delegate users within your organization who can approve the access requests by giving them a security role in Identity and Access Management (IAM). These requests show the requester name/ID in an email or Pub/Sub message that you can choose to approve. This creates a new control and logging layer that reveals who in your organization approved/denied access requests to your projects.", + "Risk": "Controlling access to your Google Cloud data is crucial when working with business-critical and sensitive data. With Access Approval, you can be certain that your cloud information is accessed by approved Google personnel only. The Access Approval feature ensures that a cryptographically-signed approval is available for Google Cloud support and engineering teams when they need to access your cloud data (certain exceptions apply). By default, Access Approval and its dependency of Access Transparency are not enabled.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/enable-access-approval.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure that Access Approval is enabled within your Google Cloud Platform (GCP) account in order to allow you to require your explicit approval whenever Google personnel need to access your GCP projects. Once the Access Approval feature is enabled, you can delegate users within your organization who can approve the access requests by giving them a security role in Identity and Access Management (IAM). These requests show the requester name/ID in an email or Pub/Sub message that you can choose to approve. This creates a new control and logging layer that reveals who in your organization approved/denied access requests to your projects.", + "Url": "https://cloud.google.com/cloud-provider-access-management/access-approval/docs" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled.py b/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled.py new file mode 100644 index 00000000..fcf85c5a --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled.py @@ -0,0 +1,24 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.iam.accessapproval_client import ( + accessapproval_client, +) + + +class iam_account_access_approval_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for project_id in accessapproval_client.project_ids: + report = Check_Report_GCP(self.metadata()) + report.project_id = project_id + report.resource_id = project_id + report.location = accessapproval_client.region + report.status = "PASS" + report.status_extended = f"Project {project_id} has Access Approval enabled" + if project_id not in accessapproval_client.settings: + report.status = "FAIL" + report.status_extended = ( + f"Project {project_id} does not have Access Approval enabled" + ) + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/__init__.py b/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled.metadata.json b/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled.metadata.json new file mode 100644 index 00000000..a723c53a --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "iam_audit_logs_enabled", + "CheckTitle": "Configure Google Cloud Audit Logs to Track All Activities", + "CheckType": [], + "ServiceName": "iam", + "SubServiceName": "Audit Logs", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "", + "Description": "Ensure that Google Cloud Audit Logs feature is configured to track Data Access logs for all Google Cloud Platform (GCP) services and users, in order to enhance overall access security and meet compliance requirements. Once configured, the feature can record all admin related activities, as well as all the read and write access requests to user data.", + "Risk": "In order to maintain an effective Google Cloud audit configuration for your project, folder, and organization, all 3 types of Data Access logs (ADMIN_READ, DATA_READ and DATA_WRITE) must be enabled for all supported GCP services. Also, Data Access logs should be captured for all IAM users, without exempting any of them. Exemptions let you control which users generate audit logs. When you add an exempted user to your log configuration, audit logs are not created for that user, for the selected log type(s). Data Access audit logs are disabled by default and must be explicitly enabled based on your business requirements.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/record-all-activities.html", + "Terraform": "https://docs.bridgecrew.io/docs/ensure-that-cloud-audit-logging-is-configured-properly-across-all-services-and-all-users-from-a-project#terraform" + }, + "Recommendation": { + "Text": "It is recommended that Cloud Audit Logging is configured to track all admin activities and read, write access to user data.", + "Url": "https://cloud.google.com/logging/docs/audit/" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled.py b/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled.py new file mode 100644 index 00000000..8df0edde --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled.py @@ -0,0 +1,24 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_client import ( + cloudresourcemanager_client, +) + + +class iam_audit_logs_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for project in cloudresourcemanager_client.projects: + report = Check_Report_GCP(self.metadata()) + report.project_id = project.id + report.location = cloudresourcemanager_client.region + report.resource_id = project.id + report.status = "PASS" + report.status_extended = f"Audit Logs are enabled for project {project.id}" + if not project.audit_logging: + report.status = "FAIL" + report.status_extended = ( + f"Audit Logs are not enabled for project {project.id}" + ) + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/iam/iam_no_service_roles_at_project_level/iam_no_service_roles_at_project_level.py b/prowler/providers/gcp/services/iam/iam_no_service_roles_at_project_level/iam_no_service_roles_at_project_level.py index ca35e6b5..06996302 100644 --- a/prowler/providers/gcp/services/iam/iam_no_service_roles_at_project_level/iam_no_service_roles_at_project_level.py +++ b/prowler/providers/gcp/services/iam/iam_no_service_roles_at_project_level/iam_no_service_roles_at_project_level.py @@ -13,6 +13,7 @@ class iam_no_service_roles_at_project_level(Check): report.project_id = binding.project_id report.resource_id = binding.role report.resource_name = binding.role + report.location = cloudresourcemanager_client.region if binding.role in [ "roles/iam.serviceAccountUser", "roles/iam.serviceAccountTokenCreator", diff --git a/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/__init__.py b/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured.metadata.json b/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured.metadata.json new file mode 100644 index 00000000..18fc6f0b --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "iam_organization_essential_contacts_configured", + "CheckTitle": "Ensure Essential Contacts is Configured for Organization", + "CheckType": [], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "Organization", + "Description": "It is recommended that Essential Contacts is configured to designate email addresses for Google Cloud services to notify of important technical or security information.", + "Risk": "Google Cloud Platform (GCP) services, such as Cloud Billing, send out billing notifications to share important information with the cloud platform users. By default, these types of notifications are sent to members with certain Identity and Access Management (IAM) roles such as 'roles/owner' and 'roles/billing.admin'. With Essential Contacts, you can specify exactly who receives important notifications by providing your own list of contacts (i.e. email addresses).", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "gcloud essential-contacts create --email= --notification-categories= --organization=", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/essential-contacts.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "It is recommended that Essential Contacts is configured to designate email addresses for Google Cloud services to notify of important technical or security information.", + "Url": "https://cloud.google.com/resource-manager/docs/managing-notification-contacts" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured.py b/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured.py new file mode 100644 index 00000000..b7cb1b6b --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured.py @@ -0,0 +1,27 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.iam.essentialcontacts_client import ( + essentialcontacts_client, +) + + +class iam_organization_essential_contacts_configured(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for org in essentialcontacts_client.organizations: + report = Check_Report_GCP(self.metadata()) + report.project_id = org.id + report.resource_id = org.id + report.resource_name = org.name + report.location = essentialcontacts_client.region + report.status = "FAIL" + report.status_extended = ( + f"Organization {org.name} does not have essential contacts configured" + ) + if org.contacts: + report.status = "PASS" + report.status_extended = ( + f"Organization {org.name} has essential contacts configured" + ) + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/__init__.py b/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties.metadata.json b/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties.metadata.json new file mode 100644 index 00000000..aa95d4f0 --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "iam_role_kms_enforce_separation_of_duties", + "CheckTitle": "Enforce Separation of Duties for KMS-Related Roles", + "CheckType": [], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "IAMRole", + "Description": "Ensure that separation of duties is enforced for all Cloud Key Management Service (KMS) related roles. The principle of separation of duties (also known as segregation of duties) has as its primary objective the prevention of fraud and human error. This objective is achieved by dismantling the tasks and the associated privileges for a specific business process among multiple users/identities. Google Cloud provides predefined roles that can be used to implement the principle of separation of duties, where it is needed. The predefined Cloud KMS Admin role is meant for users to manage KMS keys but not to use them. The Cloud KMS CryptoKey Encrypter/Decrypter roles are meant for services who can use keys to encrypt and decrypt data, but not to manage them. To adhere to cloud security best practices, your IAM users should not have the Admin role and any of the CryptoKey Encrypter/Decrypter roles assigned at the same time.", + "Risk": "The principle of separation of duties can be enforced in order to eliminate the need for the IAM user/identity that has all the permissions needed to perform unwanted actions, such as using a cryptographic key to access and decrypt data which the user should not normally have access to.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/enforce-separation-of-duties-for-kms-related-roles.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "It is recommended that the principle of 'Separation of Duties' is enforced while assigning KMS related roles to users.", + "Url": "https://cloud.google.com/kms/docs/separation-of-duties" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties.py b/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties.py new file mode 100644 index 00000000..04e091a2 --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties.py @@ -0,0 +1,36 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_client import ( + cloudresourcemanager_client, +) + + +class iam_role_kms_enforce_separation_of_duties(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for project in cloudresourcemanager_client.project_ids: + non_compliant_members = [] + kms_admin_members = [] + report = Check_Report_GCP(self.metadata()) + report.project_id = project + report.location = cloudresourcemanager_client.region + report.resource_id = project + report.status = "PASS" + report.status_extended = f"Principle of separation of duties was enforced for KMS-Related Roles in project {project}" + for binding in cloudresourcemanager_client.bindings: + if binding.project_id == project: + if "roles/cloudkms.admin" in binding.role: + kms_admin_members.extend(binding.members) + if ( + "roles/cloudkms.cryptoKeyEncrypterDecrypter" in binding.role + or "roles/cloudkms.cryptoKeyEncrypter" in binding.role + or "roles/cloudkms.cryptoKeyDecrypter" in binding.role + ): + for member in binding.members: + if member in kms_admin_members: + non_compliant_members.append(member) + if non_compliant_members: + report.status = "FAIL" + report.status_extended = f"Principle of separation of duties was not enforced for KMS-Related Roles in project {project} in members {','.join(non_compliant_members)}" + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/__init__.py b/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties.metadata.json b/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties.metadata.json new file mode 100644 index 00000000..925d0863 --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "iam_role_sa_enforce_separation_of_duties", + "CheckTitle": "Enforce Separation of Duties for Service-Account Related Roles", + "CheckType": [], + "ServiceName": "iam", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "medium", + "ResourceType": "IAMRole", + "Description": "Ensure that separation of duties (also known as segregation of duties - SoD) is enforced for all Google Cloud Platform (GCP) service-account related roles. The security principle of separation of duties has as its primary objective the prevention of fraud and human error. This objective is achieved by disbanding the tasks and associated privileges for a specific business process among multiple users/members. To follow security best practices, your GCP service accounts should not have the Service Account Admin and Service Account User roles assigned at the same time.", + "Risk": "The principle of separation of duties should be enforced in order to eliminate the need for high-privileged IAM members, as the permissions granted to these members can allow them to perform malicious or unwanted actions.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudIAM/enforce-separation-of-duties-for-service-account-roles.html", + "Terraform": "https://docs.bridgecrew.io/docs/bc_gcp_iam_10#terraform" + }, + "Recommendation": { + "Text": "Ensure that separation of duties (also known as segregation of duties - SoD) is enforced for all Google Cloud Platform (GCP) service-account related roles. The security principle of separation of duties has as its primary objective the prevention of fraud and human error. This objective is achieved by disbanding the tasks and associated privileges for a specific business process among multiple users/members. To follow security best practices, your GCP service accounts should not have the Service Account Admin and Service Account User roles assigned at the same time.", + "Url": "https://cloud.google.com/iam/docs/understanding-roles" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties.py b/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties.py new file mode 100644 index 00000000..95dc62a3 --- /dev/null +++ b/prowler/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties.py @@ -0,0 +1,29 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_client import ( + cloudresourcemanager_client, +) + + +class iam_role_sa_enforce_separation_of_duties(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for project in cloudresourcemanager_client.project_ids: + non_compliant_members = [] + report = Check_Report_GCP(self.metadata()) + report.project_id = project + report.location = cloudresourcemanager_client.region + report.resource_id = project + report.status = "PASS" + report.status_extended = f"Principle of separation of duties was enforced for Service-Account Related Roles in project {project}" + for binding in cloudresourcemanager_client.bindings: + if binding.project_id == project and ( + "roles/iam.serviceAccountUser" in binding.role + or "roles/iam.serviceAccountAdmin" in binding.role + ): + non_compliant_members.extend(binding.members) + if non_compliant_members: + report.status = "FAIL" + report.status_extended = f"Principle of separation of duties was not enforced for Service-Account Related Roles in project {project} in members {','.join(non_compliant_members)}" + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/iam/iam_service.py b/prowler/providers/gcp/services/iam/iam_service.py index c843b7e5..1540dd29 100644 --- a/prowler/providers/gcp/services/iam/iam_service.py +++ b/prowler/providers/gcp/services/iam/iam_service.py @@ -4,6 +4,9 @@ from pydantic import BaseModel from prowler.lib.logger import logger from prowler.providers.gcp.gcp_provider import generate_client +from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_client import ( + cloudresourcemanager_client, +) ################## IAM @@ -103,3 +106,86 @@ class ServiceAccount(BaseModel): display_name: str keys: list[Key] = [] project_id: str + + +################## AccessApproval +class AccessApproval: + def __init__(self, audit_info): + self.service = "accessapproval" + self.api_version = "v1" + self.project_ids = audit_info.project_ids + self.region = "global" + self.client = generate_client(self.service, self.api_version, audit_info) + self.settings = {} + self.__get_settings__() + + def __get_client__(self): + return self.client + + def __get_settings__(self): + for project_id in self.project_ids: + try: + response = ( + self.client.projects().getAccessApprovalSettings( + name=f"projects/{project_id}/accessApprovalSettings" + ) + ).execute() + self.settings[project_id].append( + Setting( + name=response["name"], + project_id=project_id, + ) + ) + except Exception as error: + logger.error( + f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + +class Setting(BaseModel): + name: str + project_id: str + + +################## EssentialContacts +class EssentialContacts: + def __init__(self, audit_info): + self.service = "essentialcontacts" + self.api_version = "v1" + self.region = "global" + self.client = generate_client(self.service, self.api_version, audit_info) + self.organizations = [] + self.__get_contacts__() + + def __get_client__(self): + return self.client + + def __get_contacts__(self): + for org in cloudresourcemanager_client.organizations: + try: + contacts = False + response = ( + self.client.organizations() + .contacts() + .list(parent="organizations/" + org.id) + ).execute() + if len(response["contacts"]) > 0: + contacts = True + + self.organizations.append( + Organization( + name=org.name, + email=org.id, + contacts=contacts, + ) + ) + except Exception as error: + logger.error( + f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + +class Organization(BaseModel): + name: str + id: str + contacts: bool diff --git a/prowler/providers/gcp/services/serviceusage/__init__.py b/prowler/providers/gcp/services/serviceusage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/serviceusage/serviceusage_client.py b/prowler/providers/gcp/services/serviceusage/serviceusage_client.py new file mode 100644 index 00000000..363dcdf9 --- /dev/null +++ b/prowler/providers/gcp/services/serviceusage/serviceusage_client.py @@ -0,0 +1,6 @@ +from prowler.providers.gcp.lib.audit_info.audit_info import gcp_audit_info +from prowler.providers.gcp.services.serviceusage.serviceusage_service import ( + ServiceUsage, +) + +serviceusage_client = ServiceUsage(gcp_audit_info) diff --git a/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/__init__.py b/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled.metadata.json b/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled.metadata.json new file mode 100644 index 00000000..2eef4104 --- /dev/null +++ b/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled.metadata.json @@ -0,0 +1,30 @@ +{ + "Provider": "gcp", + "CheckID": "serviceusage_cloudasset_inventory_enabled", + "CheckTitle": "Ensure Cloud Asset Inventory Is Enabled", + "CheckType": [], + "ServiceName": "serviceusage", + "SubServiceName": "", + "ResourceIdTemplate": "", + "Severity": "high", + "ResourceType": "Service", + "Description": "GCP Cloud Asset Inventory is services that provides a historical view of GCP resources and IAM policies through a time-series database. The information recorded includes metadata on Google Cloud resources, metadata on policies set on Google Cloud projects or resources, and runtime information gathered within a Google Cloud resource.", + "Risk": "Gaining insight into Google Cloud resources and policies is vital for tasks such as DevOps, security analytics, multi-cluster and fleet management, auditing, and governance. With Cloud Asset Inventory you can discover, monitor, and analyze all GCP assets in one place, achieving a better understanding of all your cloud assets across projects and services.", + "RelatedUrl": "", + "Remediation": { + "Code": { + "CLI": "gcloud services enable cloudasset.googleapis.com", + "NativeIaC": "", + "Other": "https://www.trendmicro.com/cloudoneconformity/knowledge-base/gcp/CloudAPI/enabled-cloud-asset-inventory.html", + "Terraform": "" + }, + "Recommendation": { + "Text": "Ensure that Cloud Asset Inventory is enabled for all your GCP projects in order to efficiently manage the history and the inventory of your cloud resources. Google Cloud Asset Inventory is a fully managed metadata inventory service that allows you to view, monitor, analyze, and gain insights for your Google Cloud and Anthos assets. Cloud Asset Inventory is disabled by default in each GCP project.", + "Url": "https://cloud.google.com/asset-inventory/docs" + } + }, + "Categories": [], + "DependsOn": [], + "RelatedTo": [], + "Notes": "" +} diff --git a/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled.py b/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled.py new file mode 100644 index 00000000..47dd348d --- /dev/null +++ b/prowler/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled.py @@ -0,0 +1,31 @@ +from prowler.lib.check.models import Check, Check_Report_GCP +from prowler.providers.gcp.services.serviceusage.serviceusage_client import ( + serviceusage_client, +) + + +class serviceusage_cloudasset_inventory_enabled(Check): + def execute(self) -> Check_Report_GCP: + findings = [] + for project_id in serviceusage_client.project_ids: + report = Check_Report_GCP(self.metadata()) + report.project_id = project_id + report.resource_id = "cloudasset.googleapis.com" + report.resource_name = "Cloud Asset Inventory" + report.location = serviceusage_client.region + report.status = "FAIL" + report.status_extended = ( + f"Cloud Asset Inventory is not enabled in project {project_id}" + ) + for active_service in serviceusage_client.active_services.get( + project_id, [] + ): + if active_service.name == "cloudasset.googleapis.com": + report.status = "PASS" + report.status_extended = ( + f"Cloud Asset Inventory is enabled in project {project_id}" + ) + break + findings.append(report) + + return findings diff --git a/prowler/providers/gcp/services/serviceusage/serviceusage_service.py b/prowler/providers/gcp/services/serviceusage/serviceusage_service.py new file mode 100644 index 00000000..4c208861 --- /dev/null +++ b/prowler/providers/gcp/services/serviceusage/serviceusage_service.py @@ -0,0 +1,51 @@ +from pydantic import BaseModel + +from prowler.lib.logger import logger +from prowler.providers.gcp.gcp_provider import generate_client + + +################## ServiceUsage +class ServiceUsage: + def __init__(self, audit_info): + self.service = "serviceusage" + self.api_version = "v1" + self.region = "global" + self.project_ids = audit_info.project_ids + self.client = generate_client(self.service, self.api_version, audit_info) + self.active_services = {} + self.__get_active_services__() + + def __get_client__(self): + return self.client + + def __get_active_services__(self): + for project_id in self.project_ids: + self.active_services[project_id] = [] + try: + request = self.client.services().list( + parent="projects/" + project_id, filter="state:ENABLED" + ) + while request is not None: + response = request.execute() + for service in response["services"]: + self.active_services[project_id].append( + Service( + name=service["name"].split("/")[-1], + title=service["config"]["title"], + project_id=project_id, + ) + ) + + request = self.client.services().list_next( + previous_request=request, previous_response=response + ) + except Exception as error: + logger.error( + f"{self.region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + +class Service(BaseModel): + name: str + title: str + project_id: str diff --git a/tests/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured_test.py b/tests/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured_test.py index faf2b607..c1145ec0 100644 --- a/tests/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured_test.py +++ b/tests/providers/gcp/services/apikeys/apikeys_api_restrictions_configured/apikeys_api_restrictions_configured_test.py @@ -40,6 +40,7 @@ class Test_apikeys_api_restrictions_configured: apikeys_client = mock.MagicMock apikeys_client.project_ids = [GCP_PROJECT_ID] apikeys_client.keys = [key] + apikeys_client.region = "global" with mock.patch( "prowler.providers.gcp.services.apikeys.apikeys_api_restrictions_configured.apikeys_api_restrictions_configured.apikeys_client", @@ -55,7 +56,7 @@ class Test_apikeys_api_restrictions_configured: assert len(result) == 1 assert result[0].status == "PASS" assert search( - f"API key {key.name} have restrictions configured.", + f"API key {key.name} has restrictions configured.", result[0].status_extended, ) assert result[0].resource_id == key.id @@ -74,6 +75,7 @@ class Test_apikeys_api_restrictions_configured: apikeys_client = mock.MagicMock apikeys_client.project_ids = [GCP_PROJECT_ID] apikeys_client.keys = [key] + apikeys_client.region = "global" with mock.patch( "prowler.providers.gcp.services.apikeys.apikeys_api_restrictions_configured.apikeys_api_restrictions_configured.apikeys_client", @@ -114,6 +116,7 @@ class Test_apikeys_api_restrictions_configured: apikeys_client = mock.MagicMock apikeys_client.project_ids = [GCP_PROJECT_ID] apikeys_client.keys = [key] + apikeys_client.region = "global" with mock.patch( "prowler.providers.gcp.services.apikeys.apikeys_api_restrictions_configured.apikeys_api_restrictions_configured.apikeys_client", diff --git a/tests/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists_test.py b/tests/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists_test.py new file mode 100644 index 00000000..9160caf7 --- /dev/null +++ b/tests/providers/gcp/services/apikeys/apikeys_key_exists/apikeys_key_exists_test.py @@ -0,0 +1,65 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_apikeys_key_exists: + def test_apikeys_no_keys(self): + apikeys_client = mock.MagicMock + apikeys_client.project_ids = [GCP_PROJECT_ID] + apikeys_client.keys = [] + apikeys_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.apikeys.apikeys_key_exists.apikeys_key_exists.apikeys_client", + new=apikeys_client, + ): + from prowler.providers.gcp.services.apikeys.apikeys_key_exists.apikeys_key_exists import ( + apikeys_key_exists, + ) + + check = apikeys_key_exists() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + f"Project {GCP_PROJECT_ID} does not have active API Keys.", + result[0].status_extended, + ) + assert result[0].resource_id == GCP_PROJECT_ID + + def test_one_compliant_key(self): + from prowler.providers.gcp.services.apikeys.apikeys_service import Key + + key = Key( + name="test", + id="123", + creation_time="2023-06-01T11:21:41.627509Z", + restrictions={}, + project_id=GCP_PROJECT_ID, + ) + + apikeys_client = mock.MagicMock + apikeys_client.project_ids = [GCP_PROJECT_ID] + apikeys_client.keys = [key] + apikeys_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.apikeys.apikeys_key_exists.apikeys_key_exists.apikeys_client", + new=apikeys_client, + ): + from prowler.providers.gcp.services.apikeys.apikeys_key_exists.apikeys_key_exists import ( + apikeys_key_exists, + ) + + check = apikeys_key_exists() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + f"Project {GCP_PROJECT_ID} has active API Keys.", + result[0].status_extended, + ) + assert result[0].resource_id == GCP_PROJECT_ID diff --git a/tests/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days_test.py b/tests/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days_test.py index 3292fbd4..474f7fbd 100644 --- a/tests/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days_test.py +++ b/tests/providers/gcp/services/apikeys/apikeys_key_rotated_in_90_days/apikeys_key_rotated_in_90_days_test.py @@ -35,6 +35,7 @@ class Test_apikeys_key_rotated_in_90_days: apikeys_client = mock.MagicMock apikeys_client.project_ids = [GCP_PROJECT_ID] apikeys_client.keys = [key] + apikeys_client.region = "global" with mock.patch( "prowler.providers.gcp.services.apikeys.apikeys_key_rotated_in_90_days.apikeys_key_rotated_in_90_days.apikeys_client", @@ -69,6 +70,7 @@ class Test_apikeys_key_rotated_in_90_days: apikeys_client = mock.MagicMock apikeys_client.project_ids = [GCP_PROJECT_ID] apikeys_client.keys = [key] + apikeys_client.region = "global" with mock.patch( "prowler.providers.gcp.services.apikeys.apikeys_key_rotated_in_90_days.apikeys_key_rotated_in_90_days.apikeys_client", @@ -84,7 +86,7 @@ class Test_apikeys_key_rotated_in_90_days: assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"API key {key.name} creation date have more than 90 days.", + f"API key {key.name} creation date has more than 90 days.", result[0].status_extended, ) assert result[0].resource_id == key.id diff --git a/tests/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled_test.py b/tests/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled_test.py index 2d46dab5..8b699187 100644 --- a/tests/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled_test.py +++ b/tests/providers/gcp/services/compute/compute_block_project_wide_ssh_keys_disabled/compute_block_project_wide_ssh_keys_disabled_test.py @@ -4,21 +4,21 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_block_project_wide_ssh_keys_disabled: +class Test_compute_instance_block_project_wide_ssh_keys_disabled: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.instances = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled import ( - compute_block_project_wide_ssh_keys_disabled, + from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import ( + compute_instance_block_project_wide_ssh_keys_disabled, ) - check = compute_block_project_wide_ssh_keys_disabled() + check = compute_instance_block_project_wide_ssh_keys_disabled() result = check.execute() assert len(result) == 0 @@ -33,6 +33,7 @@ class Test_compute_block_project_wide_ssh_keys_disabled: metadata={"items": [{"key": "block-project-ssh-keys", "value": "true"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -44,14 +45,14 @@ class Test_compute_block_project_wide_ssh_keys_disabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled import ( - compute_block_project_wide_ssh_keys_disabled, + from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import ( + compute_instance_block_project_wide_ssh_keys_disabled, ) - check = compute_block_project_wide_ssh_keys_disabled() + check = compute_instance_block_project_wide_ssh_keys_disabled() result = check.execute() assert len(result) == 1 @@ -73,6 +74,7 @@ class Test_compute_block_project_wide_ssh_keys_disabled: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -84,14 +86,14 @@ class Test_compute_block_project_wide_ssh_keys_disabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled import ( - compute_block_project_wide_ssh_keys_disabled, + from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import ( + compute_instance_block_project_wide_ssh_keys_disabled, ) - check = compute_block_project_wide_ssh_keys_disabled() + check = compute_instance_block_project_wide_ssh_keys_disabled() result = check.execute() assert len(result) == 1 @@ -113,6 +115,7 @@ class Test_compute_block_project_wide_ssh_keys_disabled: metadata={"items": [{"key": "block-project-ssh-keys", "value": "false"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -124,14 +127,14 @@ class Test_compute_block_project_wide_ssh_keys_disabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_block_project_wide_ssh_keys_disabled.compute_block_project_wide_ssh_keys_disabled import ( - compute_block_project_wide_ssh_keys_disabled, + from prowler.providers.gcp.services.compute.compute_instance_block_project_wide_ssh_keys_disabled.compute_instance_block_project_wide_ssh_keys_disabled import ( + compute_instance_block_project_wide_ssh_keys_disabled, ) - check = compute_block_project_wide_ssh_keys_disabled() + check = compute_instance_block_project_wide_ssh_keys_disabled() result = check.execute() assert len(result) == 1 diff --git a/tests/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use_test.py b/tests/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use_test.py index 0311a284..2e63b975 100644 --- a/tests/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use_test.py +++ b/tests/providers/gcp/services/compute/compute_default_service_account_in_use/compute_default_service_account_in_use_test.py @@ -4,20 +4,20 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_default_service_account_in_use: +class Test_compute_instance_default_service_account_in_use: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.instances = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use import ( - compute_default_service_account_in_use, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use import ( + compute_instance_default_service_account_in_use, ) - check = compute_default_service_account_in_use() + check = compute_instance_default_service_account_in_use() result = check.execute() assert len(result) == 0 @@ -32,6 +32,7 @@ class Test_compute_default_service_account_in_use: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], service_accounts=[{"email": "custom@developer.gserviceaccount.com"}], @@ -43,14 +44,14 @@ class Test_compute_default_service_account_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use import ( - compute_default_service_account_in_use, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use import ( + compute_instance_default_service_account_in_use, ) - check = compute_default_service_account_in_use() + check = compute_instance_default_service_account_in_use() result = check.execute() assert len(result) == 1 @@ -72,6 +73,7 @@ class Test_compute_default_service_account_in_use: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[ {"email": f"{GCP_PROJECT_ID}-compute@developer.gserviceaccount.com"} ], @@ -85,14 +87,14 @@ class Test_compute_default_service_account_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use import ( - compute_default_service_account_in_use, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use import ( + compute_instance_default_service_account_in_use, ) - check = compute_default_service_account_in_use() + check = compute_instance_default_service_account_in_use() result = check.execute() assert len(result) == 1 @@ -114,6 +116,7 @@ class Test_compute_default_service_account_in_use: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[ {"email": f"{GCP_PROJECT_ID}-compute@developer.gserviceaccount.com"} ], @@ -127,14 +130,14 @@ class Test_compute_default_service_account_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use.compute_default_service_account_in_use import ( - compute_default_service_account_in_use, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use.compute_instance_default_service_account_in_use import ( + compute_instance_default_service_account_in_use, ) - check = compute_default_service_account_in_use() + check = compute_instance_default_service_account_in_use() result = check.execute() assert len(result) == 1 diff --git a/tests/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access_test.py b/tests/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access_test.py index 28d9d8f7..d665c10a 100644 --- a/tests/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access_test.py +++ b/tests/providers/gcp/services/compute/compute_default_service_account_in_use_with_full_api_access/compute_default_service_account_in_use_with_full_api_access_test.py @@ -4,20 +4,22 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_default_service_account_in_use_with_full_api_access: +class Test_compute_instance_default_service_account_in_use_with_full_api_access: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.instances = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access import ( - compute_default_service_account_in_use_with_full_api_access, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access import ( + compute_instance_default_service_account_in_use_with_full_api_access, ) - check = compute_default_service_account_in_use_with_full_api_access() + check = ( + compute_instance_default_service_account_in_use_with_full_api_access() + ) result = check.execute() assert len(result) == 0 @@ -32,6 +34,7 @@ class Test_compute_default_service_account_in_use_with_full_api_access: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[ {"email": "123-compute@developer.gserviceaccount.com", "scopes": []} ], @@ -45,14 +48,16 @@ class Test_compute_default_service_account_in_use_with_full_api_access: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access import ( - compute_default_service_account_in_use_with_full_api_access, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access import ( + compute_instance_default_service_account_in_use_with_full_api_access, ) - check = compute_default_service_account_in_use_with_full_api_access() + check = ( + compute_instance_default_service_account_in_use_with_full_api_access() + ) result = check.execute() assert len(result) == 1 @@ -74,6 +79,7 @@ class Test_compute_default_service_account_in_use_with_full_api_access: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[ { "email": f"{GCP_PROJECT_ID}-compute@developer.gserviceaccount.com", @@ -90,14 +96,16 @@ class Test_compute_default_service_account_in_use_with_full_api_access: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access import ( - compute_default_service_account_in_use_with_full_api_access, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access import ( + compute_instance_default_service_account_in_use_with_full_api_access, ) - check = compute_default_service_account_in_use_with_full_api_access() + check = ( + compute_instance_default_service_account_in_use_with_full_api_access() + ) result = check.execute() assert len(result) == 1 @@ -119,6 +127,7 @@ class Test_compute_default_service_account_in_use_with_full_api_access: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[ { "email": f"{GCP_PROJECT_ID}-compute@developer.gserviceaccount.com", @@ -135,14 +144,16 @@ class Test_compute_default_service_account_in_use_with_full_api_access: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_default_service_account_in_use_with_full_api_access.compute_default_service_account_in_use_with_full_api_access import ( - compute_default_service_account_in_use_with_full_api_access, + from prowler.providers.gcp.services.compute.compute_instance_default_service_account_in_use_with_full_api_access.compute_instance_default_service_account_in_use_with_full_api_access import ( + compute_instance_default_service_account_in_use_with_full_api_access, ) - check = compute_default_service_account_in_use_with_full_api_access() + check = ( + compute_instance_default_service_account_in_use_with_full_api_access() + ) result = check.execute() assert len(result) == 1 diff --git a/tests/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled_test.py b/tests/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled_test.py index 8d266ac6..4d24f2d7 100644 --- a/tests/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled_test.py +++ b/tests/providers/gcp/services/compute/compute_encryption_with_csek_is_disabled/compute_encryption_with_csek_is_disabled_test.py @@ -4,21 +4,21 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_encryption_with_csek_is_disabled: +class Test_compute_instance_encryption_with_csek_enabled: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.instances = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled import ( - compute_encryption_with_csek_is_disabled, + from prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled import ( + compute_instance_encryption_with_csek_enabled, ) - check = compute_encryption_with_csek_is_disabled() + check = compute_instance_encryption_with_csek_enabled() result = check.execute() assert len(result) == 0 @@ -33,6 +33,7 @@ class Test_compute_encryption_with_csek_is_disabled: metadata={"items": [{"key": "block-project-ssh-keys", "value": "true"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", True), ("disk2", True)], @@ -44,20 +45,20 @@ class Test_compute_encryption_with_csek_is_disabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled import ( - compute_encryption_with_csek_is_disabled, + from prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled import ( + compute_instance_encryption_with_csek_enabled, ) - check = compute_encryption_with_csek_is_disabled() + check = compute_instance_encryption_with_csek_enabled() result = check.execute() assert len(result) == 1 assert result[0].status == "PASS" assert search( - f"The VM Instance {instance.name} have every disk encrypted.", + f"The VM Instance {instance.name} has every disk encrypted.", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -73,6 +74,7 @@ class Test_compute_encryption_with_csek_is_disabled: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", True)], @@ -84,20 +86,20 @@ class Test_compute_encryption_with_csek_is_disabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled import ( - compute_encryption_with_csek_is_disabled, + from prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled import ( + compute_instance_encryption_with_csek_enabled, ) - check = compute_encryption_with_csek_is_disabled() + check = compute_instance_encryption_with_csek_enabled() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"The VM Instance {instance.name} have the following unencrypted disks: '{', '.join([i[0] for i in instance.disks_encryption if not i[1]])}'", + f"The VM Instance {instance.name} has the following unencrypted disks: '{', '.join([i[0] for i in instance.disks_encryption if not i[1]])}'", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -113,6 +115,7 @@ class Test_compute_encryption_with_csek_is_disabled: metadata={"items": [{"key": "block-project-ssh-keys", "value": "false"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -124,20 +127,20 @@ class Test_compute_encryption_with_csek_is_disabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_encryption_with_csek_is_disabled.compute_encryption_with_csek_is_disabled import ( - compute_encryption_with_csek_is_disabled, + from prowler.providers.gcp.services.compute.compute_instance_encryption_with_csek_enabled.compute_instance_encryption_with_csek_enabled import ( + compute_instance_encryption_with_csek_enabled, ) - check = compute_encryption_with_csek_is_disabled() + check = compute_instance_encryption_with_csek_enabled() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"The VM Instance {instance.name} have the following unencrypted disks: '{', '.join([i[0] for i in instance.disks_encryption if not i[1]])}'", + f"The VM Instance {instance.name} has the following unencrypted disks: '{', '.join([i[0] for i in instance.disks_encryption if not i[1]])}'", result[0].status_extended, ) assert result[0].resource_id == instance.id diff --git a/tests/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled_test.py b/tests/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled_test.py new file mode 100644 index 00000000..a64284b9 --- /dev/null +++ b/tests/providers/gcp/services/compute/compute_instance_confidential_computing_enabled/compute_instance_confidential_computing_enabled_test.py @@ -0,0 +1,111 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_compute_instance_confidential_computing_enabled: + def test_compute_no_instances(self): + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.instances = [] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_instance_confidential_computing_enabled.compute_instance_confidential_computing_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_instance_confidential_computing_enabled.compute_instance_confidential_computing_enabled import ( + compute_instance_confidential_computing_enabled, + ) + + check = compute_instance_confidential_computing_enabled() + result = check.execute() + assert len(result) == 0 + + def test_one_compliant_instance(self): + from prowler.providers.gcp.services.compute.compute_service import Instance + + instance = Instance( + name="test", + id="1234567890", + zone="us-central1-a", + public_ip=True, + metadata={}, + shielded_enabled_vtpm=True, + shielded_enabled_integrity_monitoring=True, + confidential_computing=True, + service_accounts=[], + ip_forward=False, + disks_encryption=[("disk1", False), ("disk2", False)], + project_id=GCP_PROJECT_ID, + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.instances = [instance] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_instance_confidential_computing_enabled.compute_instance_confidential_computing_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_instance_confidential_computing_enabled.compute_instance_confidential_computing_enabled import ( + compute_instance_confidential_computing_enabled, + ) + + check = compute_instance_confidential_computing_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + f"VM Instance {instance.name} has Confidential Computing enabled", + result[0].status_extended, + ) + assert result[0].resource_id == instance.id + assert result[0].resource_name == instance.name + assert result[0].location == instance.zone + assert result[0].project_id == GCP_PROJECT_ID + + def test_one_instance_with_shielded_vtpm_disabled(self): + from prowler.providers.gcp.services.compute.compute_service import Instance + + instance = Instance( + name="test", + id="1234567890", + zone="us-central1-a", + public_ip=True, + metadata={}, + shielded_enabled_vtpm=False, + shielded_enabled_integrity_monitoring=True, + confidential_computing=False, + service_accounts=[], + ip_forward=False, + disks_encryption=[("disk1", False), ("disk2", False)], + project_id=GCP_PROJECT_ID, + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.instances = [instance] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_instance_confidential_computing_enabled.compute_instance_confidential_computing_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_instance_confidential_computing_enabled.compute_instance_confidential_computing_enabled import ( + compute_instance_confidential_computing_enabled, + ) + + check = compute_instance_confidential_computing_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + f"VM Instance {instance.name} does not have Confidential Computing enabled", + result[0].status_extended, + ) + assert result[0].resource_id == instance.id + assert result[0].resource_name == instance.name + assert result[0].location == instance.zone + assert result[0].project_id == GCP_PROJECT_ID diff --git a/tests/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled_test.py b/tests/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled_test.py index a51f1da9..53740aeb 100644 --- a/tests/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled_test.py +++ b/tests/providers/gcp/services/compute/compute_ip_forwarding_is_enabled/compute_ip_forwarding_is_enabled_test.py @@ -4,20 +4,20 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_ip_forwarding_is_enabled: +class Test_compute_instance_ip_forwarding_is_enabled: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.instances = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled import ( - compute_ip_forwarding_is_enabled, + from prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled import ( + compute_instance_ip_forwarding_is_enabled, ) - check = compute_ip_forwarding_is_enabled() + check = compute_instance_ip_forwarding_is_enabled() result = check.execute() assert len(result) == 0 @@ -32,6 +32,7 @@ class Test_compute_ip_forwarding_is_enabled: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[{"email": "123-compute@developer.gserviceaccount.com"}], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -43,14 +44,14 @@ class Test_compute_ip_forwarding_is_enabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled import ( - compute_ip_forwarding_is_enabled, + from prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled import ( + compute_instance_ip_forwarding_is_enabled, ) - check = compute_ip_forwarding_is_enabled() + check = compute_instance_ip_forwarding_is_enabled() result = check.execute() assert len(result) == 1 @@ -72,6 +73,7 @@ class Test_compute_ip_forwarding_is_enabled: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[ {"email": f"{GCP_PROJECT_ID}-compute@developer.gserviceaccount.com"} ], @@ -85,14 +87,14 @@ class Test_compute_ip_forwarding_is_enabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled import ( - compute_ip_forwarding_is_enabled, + from prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled import ( + compute_instance_ip_forwarding_is_enabled, ) - check = compute_ip_forwarding_is_enabled() + check = compute_instance_ip_forwarding_is_enabled() result = check.execute() assert len(result) == 1 @@ -114,6 +116,7 @@ class Test_compute_ip_forwarding_is_enabled: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[ {"email": f"{GCP_PROJECT_ID}-compute@developer.gserviceaccount.com"} ], @@ -127,14 +130,14 @@ class Test_compute_ip_forwarding_is_enabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ip_forwarding_is_enabled.compute_ip_forwarding_is_enabled import ( - compute_ip_forwarding_is_enabled, + from prowler.providers.gcp.services.compute.compute_instance_ip_forwarding_is_enabled.compute_instance_ip_forwarding_is_enabled import ( + compute_instance_ip_forwarding_is_enabled, ) - check = compute_ip_forwarding_is_enabled() + check = compute_instance_ip_forwarding_is_enabled() result = check.execute() assert len(result) == 1 diff --git a/tests/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled_test.py b/tests/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled_test.py new file mode 100644 index 00000000..48cf8956 --- /dev/null +++ b/tests/providers/gcp/services/compute/compute_loadbalancer_logging_enabled/compute_loadbalancer_logging_enabled_test.py @@ -0,0 +1,99 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_compute_loadbalancer_logging_enabled: + def test_compute_no_load_balancers(self): + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.load_balancers = [] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_loadbalancer_logging_enabled.compute_loadbalancer_logging_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_loadbalancer_logging_enabled.compute_loadbalancer_logging_enabled import ( + compute_loadbalancer_logging_enabled, + ) + + check = compute_loadbalancer_logging_enabled() + result = check.execute() + assert len(result) == 0 + + def test_one_compliant_load_balancer(self): + from prowler.providers.gcp.services.compute.compute_service import LoadBalancer + + load_balancer = LoadBalancer( + name="test", + id="test_id", + project_id=GCP_PROJECT_ID, + logging=True, + service="test", + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.load_balancers = [load_balancer] + compute_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_loadbalancer_logging_enabled.compute_loadbalancer_logging_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_loadbalancer_logging_enabled.compute_loadbalancer_logging_enabled import ( + compute_loadbalancer_logging_enabled, + ) + + check = compute_loadbalancer_logging_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has logging enabled", + result[0].status_extended, + ) + assert result[0].resource_id == load_balancer.id + assert result[0].resource_name == load_balancer.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == compute_client.region + + def test_one_uncompliant_load_balancer(self): + from prowler.providers.gcp.services.compute.compute_service import LoadBalancer + + load_balancer = LoadBalancer( + name="test", + id="test_id", + project_id=GCP_PROJECT_ID, + logging=False, + service="test", + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.load_balancers = [load_balancer] + compute_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_loadbalancer_logging_enabled.compute_loadbalancer_logging_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_loadbalancer_logging_enabled.compute_loadbalancer_logging_enabled import ( + compute_loadbalancer_logging_enabled, + ) + + check = compute_loadbalancer_logging_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have logging enabled", + result[0].status_extended, + ) + assert result[0].resource_id == load_balancer.id + assert result[0].resource_name == load_balancer.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == compute_client.region diff --git a/tests/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled_test.py b/tests/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled_test.py new file mode 100644 index 00000000..8e1ff2fb --- /dev/null +++ b/tests/providers/gcp/services/compute/compute_network_dns_logging_enabled/compute_network_dns_logging_enabled_test.py @@ -0,0 +1,126 @@ +from re import search +from unittest import mock + +from prowler.providers.gcp.services.dns.dns_service import Policy + +GCP_PROJECT_ID = "123456789012" + + +class Test_compute_network_dns_logging_enabled: + def test_compute_no_networks(self): + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.networks = [] + compute_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled import ( + compute_network_dns_logging_enabled, + ) + + check = compute_network_dns_logging_enabled() + result = check.execute() + assert len(result) == 0 + + def test_one_compliant_network(self): + from prowler.providers.gcp.services.compute.compute_service import Network + + network = Network( + name="test", id="test_id", project_id=GCP_PROJECT_ID, subnet_mode="auto" + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.networks = [network] + compute_client.region = "global" + + policy = Policy( + name="test", + id="test_id", + logging=True, + networks=["test"], + project_id=GCP_PROJECT_ID, + ) + + dns_client = mock.MagicMock + dns_client.project_ids = [GCP_PROJECT_ID] + dns_client.policies = [policy] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled.compute_client", + new=compute_client, + ): + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled.dns_client", + new=dns_client, + ): + from prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled import ( + compute_network_dns_logging_enabled, + ) + + check = compute_network_dns_logging_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has DNS logging enabled", + result[0].status_extended, + ) + assert result[0].resource_id == network.id + assert result[0].resource_name == network.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == compute_client.region + + def test_one_uncompliant_network(self): + from prowler.providers.gcp.services.compute.compute_service import Network + + network = Network( + name="test", id="test_id", project_id=GCP_PROJECT_ID, subnet_mode="auto" + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.networks = [network] + compute_client.region = "global" + + policy = Policy( + name="test", + id="test_id", + logging=False, + networks=["test"], + project_id=GCP_PROJECT_ID, + ) + + dns_client = mock.MagicMock + dns_client.project_ids = [GCP_PROJECT_ID] + dns_client.policies = [policy] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled.compute_client", + new=compute_client, + ): + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled.dns_client", + new=dns_client, + ): + from prowler.providers.gcp.services.compute.compute_network_dns_logging_enabled.compute_network_dns_logging_enabled import ( + compute_network_dns_logging_enabled, + ) + + check = compute_network_dns_logging_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have DNS logging enabled", + result[0].status_extended, + ) + assert result[0].resource_id == network.id + assert result[0].resource_name == network.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == compute_client.region diff --git a/tests/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy_test.py b/tests/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy_test.py new file mode 100644 index 00000000..234b8315 --- /dev/null +++ b/tests/providers/gcp/services/compute/compute_network_not_legacy/compute_network_not_legacy_test.py @@ -0,0 +1,98 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_compute_network_not_legacy: + def test_compute_no_networks(self): + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.networks = [] + compute_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_not_legacy.compute_network_not_legacy.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_network_not_legacy.compute_network_not_legacy import ( + compute_network_not_legacy, + ) + + check = compute_network_not_legacy() + result = check.execute() + assert len(result) == 0 + + def test_one_compliant_network(self): + from prowler.providers.gcp.services.compute.compute_service import Network + + network = Network( + name="test", + id="test_id", + project_id=GCP_PROJECT_ID, + subnet_mode="custom", + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.networks = [network] + compute_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_not_legacy.compute_network_not_legacy.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_network_not_legacy.compute_network_not_legacy import ( + compute_network_not_legacy, + ) + + check = compute_network_not_legacy() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "Network test is not legacy", + result[0].status_extended, + ) + assert result[0].resource_id == network.id + assert result[0].resource_name == network.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == compute_client.region + + def test_one_legacy_network(self): + from prowler.providers.gcp.services.compute.compute_service import Network + + network = Network( + name="test", + id="test_id", + project_id=GCP_PROJECT_ID, + subnet_mode="legacy", + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.networks = [network] + compute_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_network_not_legacy.compute_network_not_legacy.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_network_not_legacy.compute_network_not_legacy import ( + compute_network_not_legacy, + ) + + check = compute_network_not_legacy() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "Legacy network test exists", + result[0].status_extended, + ) + assert result[0].resource_id == network.id + assert result[0].resource_name == network.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == compute_client.region diff --git a/tests/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled_test.py b/tests/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled_test.py new file mode 100644 index 00000000..6512a9c4 --- /dev/null +++ b/tests/providers/gcp/services/compute/compute_project_os_login_enabled/compute_project_os_login_enabled_test.py @@ -0,0 +1,89 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_compute_project_os_login_enabled: + def test_compute_no_project(self): + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.projects = [] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_project_os_login_enabled.compute_project_os_login_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_project_os_login_enabled.compute_project_os_login_enabled import ( + compute_project_os_login_enabled, + ) + + check = compute_project_os_login_enabled() + result = check.execute() + assert len(result) == 0 + + def test_one_compliant_project(self): + from prowler.providers.gcp.services.compute.compute_service import Project + + project = Project( + id=GCP_PROJECT_ID, + enable_oslogin=True, + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.projects = [project] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_project_os_login_enabled.compute_project_os_login_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_project_os_login_enabled.compute_project_os_login_enabled import ( + compute_project_os_login_enabled, + ) + + check = compute_project_os_login_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + f"Project {project.id} has OS Login enabled", + result[0].status_extended, + ) + assert result[0].resource_id == project.id + assert result[0].location == "global" + assert result[0].project_id == GCP_PROJECT_ID + + def test_one_non_compliant_project(self): + from prowler.providers.gcp.services.compute.compute_service import Project + + project = Project( + id=GCP_PROJECT_ID, + enable_oslogin=False, + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.projects = [project] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_project_os_login_enabled.compute_project_os_login_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_project_os_login_enabled.compute_project_os_login_enabled import ( + compute_project_os_login_enabled, + ) + + check = compute_project_os_login_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + f"Project {project.id} does not have OS Login enabled", + result[0].status_extended, + ) + assert result[0].resource_id == project.id + assert result[0].location == "global" + assert result[0].project_id == GCP_PROJECT_ID diff --git a/tests/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed_test.py b/tests/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed_test.py index 28f448fb..50d9225f 100644 --- a/tests/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed_test.py +++ b/tests/providers/gcp/services/compute/compute_rdp_access_from_the_internet_allowed/compute_rdp_access_from_the_internet_allowed_test.py @@ -4,20 +4,20 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_rdp_access_from_the_internet_allowed: +class Test_compute_firewall_rdp_access_from_the_internet_allowed: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.firewalls = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 0 @@ -36,16 +36,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -71,16 +72,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -106,16 +108,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -141,16 +144,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -176,16 +180,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -211,16 +216,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -246,16 +252,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -281,16 +288,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -316,16 +324,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -354,16 +363,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -393,16 +403,17 @@ class Test_compute_rdp_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_rdp_access_from_the_internet_allowed.compute_rdp_access_from_the_internet_allowed import ( - compute_rdp_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_rdp_access_from_the_internet_allowed.compute_firewall_rdp_access_from_the_internet_allowed import ( + compute_firewall_rdp_access_from_the_internet_allowed, ) - check = compute_rdp_access_from_the_internet_allowed() + check = compute_firewall_rdp_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 diff --git a/tests/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use_test.py b/tests/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use_test.py index 14d98621..10ac1446 100644 --- a/tests/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use_test.py +++ b/tests/providers/gcp/services/compute/compute_serial_ports_in_use/compute_serial_ports_in_use_test.py @@ -4,21 +4,21 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_serial_ports_in_use: +class Test_compute_instance_serial_ports_in_use: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.instances = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use import ( - compute_serial_ports_in_use, + from prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use import ( + compute_instance_serial_ports_in_use, ) - check = compute_serial_ports_in_use() + check = compute_instance_serial_ports_in_use() result = check.execute() assert len(result) == 0 @@ -33,6 +33,7 @@ class Test_compute_serial_ports_in_use: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -44,20 +45,20 @@ class Test_compute_serial_ports_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use import ( - compute_serial_ports_in_use, + from prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use import ( + compute_instance_serial_ports_in_use, ) - check = compute_serial_ports_in_use() + check = compute_instance_serial_ports_in_use() result = check.execute() assert len(result) == 1 assert result[0].status == "PASS" assert search( - f"VM Instance {instance.name} have ‘Enable Connecting to Serial Ports’ off", + f"VM Instance {instance.name} has Enable Connecting to Serial Ports off", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -73,6 +74,7 @@ class Test_compute_serial_ports_in_use: metadata={"items": [{"key": "serial-port-enabled", "value": "0"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -84,20 +86,20 @@ class Test_compute_serial_ports_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use import ( - compute_serial_ports_in_use, + from prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use import ( + compute_instance_serial_ports_in_use, ) - check = compute_serial_ports_in_use() + check = compute_instance_serial_ports_in_use() result = check.execute() assert len(result) == 1 assert result[0].status == "PASS" assert search( - f"VM Instance {instance.name} have ‘Enable Connecting to Serial Ports’ off", + f"VM Instance {instance.name} has Enable Connecting to Serial Ports off", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -113,6 +115,7 @@ class Test_compute_serial_ports_in_use: metadata={"items": [{"key": "serial-port-enabled", "value": "false"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -124,20 +127,20 @@ class Test_compute_serial_ports_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use import ( - compute_serial_ports_in_use, + from prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use import ( + compute_instance_serial_ports_in_use, ) - check = compute_serial_ports_in_use() + check = compute_instance_serial_ports_in_use() result = check.execute() assert len(result) == 1 assert result[0].status == "PASS" assert search( - f"VM Instance {instance.name} have ‘Enable Connecting to Serial Ports’ off", + f"VM Instance {instance.name} has Enable Connecting to Serial Ports off", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -153,6 +156,7 @@ class Test_compute_serial_ports_in_use: metadata={"items": [{"key": "serial-port-enable", "value": "1"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -164,20 +168,20 @@ class Test_compute_serial_ports_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use import ( - compute_serial_ports_in_use, + from prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use import ( + compute_instance_serial_ports_in_use, ) - check = compute_serial_ports_in_use() + check = compute_instance_serial_ports_in_use() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"VM Instance {instance.name} have ‘Enable Connecting to Serial Ports’ set to on", + f"VM Instance {instance.name} has Enable Connecting to Serial Ports set to on", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -193,6 +197,7 @@ class Test_compute_serial_ports_in_use: metadata={"items": [{"key": "serial-port-enable", "value": "true"}]}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -204,20 +209,20 @@ class Test_compute_serial_ports_in_use: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_serial_ports_in_use.compute_serial_ports_in_use import ( - compute_serial_ports_in_use, + from prowler.providers.gcp.services.compute.compute_instance_serial_ports_in_use.compute_instance_serial_ports_in_use import ( + compute_instance_serial_ports_in_use, ) - check = compute_serial_ports_in_use() + check = compute_instance_serial_ports_in_use() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"VM Instance {instance.name} have ‘Enable Connecting to Serial Ports’ set to on", + f"VM Instance {instance.name} has Enable Connecting to Serial Ports set to on", result[0].status_extended, ) assert result[0].resource_id == instance.id diff --git a/tests/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled_test.py b/tests/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled_test.py index 51c02a9e..7e134ecf 100644 --- a/tests/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled_test.py +++ b/tests/providers/gcp/services/compute/compute_shielded_vm_enabled/compute_shielded_vm_enabled_test.py @@ -4,21 +4,21 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_shielded_vm_enabled: +class Test_compute_instance_shielded_vm_enabled: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.instances = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled import ( - compute_shielded_vm_enabled, + from prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled import ( + compute_instance_shielded_vm_enabled, ) - check = compute_shielded_vm_enabled() + check = compute_instance_shielded_vm_enabled() result = check.execute() assert len(result) == 0 @@ -33,6 +33,7 @@ class Test_compute_shielded_vm_enabled: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -44,20 +45,20 @@ class Test_compute_shielded_vm_enabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled import ( - compute_shielded_vm_enabled, + from prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled import ( + compute_instance_shielded_vm_enabled, ) - check = compute_shielded_vm_enabled() + check = compute_instance_shielded_vm_enabled() result = check.execute() assert len(result) == 1 assert result[0].status == "PASS" assert search( - f"VM Instance {instance.name} have vTPM or Integrity Monitoring set to on", + f"VM Instance {instance.name} has vTPM or Integrity Monitoring set to on", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -73,6 +74,7 @@ class Test_compute_shielded_vm_enabled: metadata={}, shielded_enabled_vtpm=False, shielded_enabled_integrity_monitoring=True, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -84,20 +86,20 @@ class Test_compute_shielded_vm_enabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled import ( - compute_shielded_vm_enabled, + from prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled import ( + compute_instance_shielded_vm_enabled, ) - check = compute_shielded_vm_enabled() + check = compute_instance_shielded_vm_enabled() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"VM Instance {instance.name} don't have vTPM and Integrity Monitoring set to on", + f"VM Instance {instance.name} doesn't have vTPM and Integrity Monitoring set to on", result[0].status_extended, ) assert result[0].resource_id == instance.id @@ -113,6 +115,7 @@ class Test_compute_shielded_vm_enabled: metadata={}, shielded_enabled_vtpm=True, shielded_enabled_integrity_monitoring=False, + confidential_computing=True, service_accounts=[], ip_forward=False, disks_encryption=[("disk1", False), ("disk2", False)], @@ -124,20 +127,20 @@ class Test_compute_shielded_vm_enabled: compute_client.instances = [instance] with mock.patch( - "prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled.compute_client", + "prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_shielded_vm_enabled.compute_shielded_vm_enabled import ( - compute_shielded_vm_enabled, + from prowler.providers.gcp.services.compute.compute_instance_shielded_vm_enabled.compute_instance_shielded_vm_enabled import ( + compute_instance_shielded_vm_enabled, ) - check = compute_shielded_vm_enabled() + check = compute_instance_shielded_vm_enabled() result = check.execute() assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"VM Instance {instance.name} don't have vTPM and Integrity Monitoring set to on", + f"VM Instance {instance.name} doesn't have vTPM and Integrity Monitoring set to on", result[0].status_extended, ) assert result[0].resource_id == instance.id diff --git a/tests/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed_test.py b/tests/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed_test.py index 25ce22f1..892ae55f 100644 --- a/tests/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed_test.py +++ b/tests/providers/gcp/services/compute/compute_ssh_access_from_the_internet_allowed/compute_ssh_access_from_the_internet_allowed_test.py @@ -4,20 +4,20 @@ from unittest import mock GCP_PROJECT_ID = "123456789012" -class Test_compute_ssh_access_from_the_internet_allowed: +class Test_compute_firewall_ssh_access_from_the_internet_allowed: def test_compute_no_instances(self): compute_client = mock.MagicMock compute_client.firewalls = [] with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 0 @@ -36,16 +36,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -71,16 +72,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -106,16 +108,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -141,16 +144,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -176,16 +180,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -211,16 +216,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -246,16 +252,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -281,16 +288,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -316,16 +324,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -354,16 +363,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 @@ -393,16 +403,17 @@ class Test_compute_ssh_access_from_the_internet_allowed: compute_client = mock.MagicMock compute_client.project_ids = [GCP_PROJECT_ID] compute_client.firewalls = [firewall] + compute_client.region = "global" with mock.patch( - "prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed.compute_client", + "prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed.compute_client", new=compute_client, ): - from prowler.providers.gcp.services.compute.compute_ssh_access_from_the_internet_allowed.compute_ssh_access_from_the_internet_allowed import ( - compute_ssh_access_from_the_internet_allowed, + from prowler.providers.gcp.services.compute.compute_firewall_ssh_access_from_the_internet_allowed.compute_firewall_ssh_access_from_the_internet_allowed import ( + compute_firewall_ssh_access_from_the_internet_allowed, ) - check = compute_ssh_access_from_the_internet_allowed() + check = compute_firewall_ssh_access_from_the_internet_allowed() result = check.execute() assert len(result) == 1 diff --git a/tests/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled_test.py b/tests/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled_test.py new file mode 100644 index 00000000..3f51e02d --- /dev/null +++ b/tests/providers/gcp/services/compute/compute_subnet_flow_logs_enabled/compute_subnet_flow_logs_enabled_test.py @@ -0,0 +1,99 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_compute_subnet_flow_logs_enabled: + def test_compute_no_subnets(self): + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.subnets = [] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_subnet_flow_logs_enabled.compute_subnet_flow_logs_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_subnet_flow_logs_enabled.compute_subnet_flow_logs_enabled import ( + compute_subnet_flow_logs_enabled, + ) + + check = compute_subnet_flow_logs_enabled() + result = check.execute() + assert len(result) == 0 + + def test_one_compliant_subnet(self): + from prowler.providers.gcp.services.compute.compute_service import Subnet + + subnet = Subnet( + name="test", + id="test_id", + project_id=GCP_PROJECT_ID, + flow_logs=True, + network="network", + region="global", + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.subnets = [subnet] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_subnet_flow_logs_enabled.compute_subnet_flow_logs_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_subnet_flow_logs_enabled.compute_subnet_flow_logs_enabled import ( + compute_subnet_flow_logs_enabled, + ) + + check = compute_subnet_flow_logs_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has flow logs enabled", + result[0].status_extended, + ) + assert result[0].resource_id == subnet.id + assert result[0].resource_name == subnet.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == subnet.region + + def test_one_uncompliant_subnet(self): + from prowler.providers.gcp.services.compute.compute_service import Subnet + + subnet = Subnet( + name="test", + id="test_id", + project_id=GCP_PROJECT_ID, + flow_logs=False, + network="network", + region="global", + ) + + compute_client = mock.MagicMock + compute_client.project_ids = [GCP_PROJECT_ID] + compute_client.subnets = [subnet] + + with mock.patch( + "prowler.providers.gcp.services.compute.compute_subnet_flow_logs_enabled.compute_subnet_flow_logs_enabled.compute_client", + new=compute_client, + ): + from prowler.providers.gcp.services.compute.compute_subnet_flow_logs_enabled.compute_subnet_flow_logs_enabled import ( + compute_subnet_flow_logs_enabled, + ) + + check = compute_subnet_flow_logs_enabled() + result = check.execute() + + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have flow logs enabled", + result[0].status_extended, + ) + assert result[0].resource_id == subnet.id + assert result[0].resource_name == subnet.name + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == subnet.region diff --git a/tests/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled_test.py b/tests/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled_test.py index 096159cc..9da61495 100644 --- a/tests/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled_test.py +++ b/tests/providers/gcp/services/dns/dns_dnssec_disabled/dns_dnssec_disabled_test.py @@ -63,7 +63,7 @@ class Test_dns_dnssec_disabled: assert len(result) == 1 assert result[0].status == "PASS" assert search( - f"Cloud DNS {managed_zone.name} have DNSSEC enabled.", + f"Cloud DNS {managed_zone.name} has DNSSEC enabled.", result[0].status_extended, ) assert result[0].resource_id == managed_zone.id @@ -110,7 +110,7 @@ class Test_dns_dnssec_disabled: assert len(result) == 1 assert result[0].status == "FAIL" assert search( - f"Cloud DNS {managed_zone.name} doens't have DNSSEC enabled.", + f"Cloud DNS {managed_zone.name} doesn't have DNSSEC enabled.", result[0].status_extended, ) assert result[0].resource_id == managed_zone.id diff --git a/tests/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled_test.py b/tests/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled_test.py new file mode 100644 index 00000000..1aa51292 --- /dev/null +++ b/tests/providers/gcp/services/iam/iam_account_access_approval_enabled/iam_account_access_approval_enabled_test.py @@ -0,0 +1,62 @@ +from re import search +from unittest import mock + +from prowler.providers.gcp.services.iam.iam_service import Setting + +GCP_PROJECT_ID = "123456789012" + + +class Test_iam_account_access_approval_enabled: + def test_iam_no_settings(self): + accessapproval_client = mock.MagicMock + accessapproval_client.settings = {} + accessapproval_client.project_ids = [GCP_PROJECT_ID] + accessapproval_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_account_access_approval_enabled.iam_account_access_approval_enabled.accessapproval_client", + new=accessapproval_client, + ): + from prowler.providers.gcp.services.iam.iam_account_access_approval_enabled.iam_account_access_approval_enabled import ( + iam_account_access_approval_enabled, + ) + + check = iam_account_access_approval_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have Access Approval enabled", + result[0].status_extended, + ) + assert result[0].resource_id == GCP_PROJECT_ID + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == "global" + + def test_iam_project_with_settings(self): + accessapproval_client = mock.MagicMock + accessapproval_client.settings = { + GCP_PROJECT_ID: Setting(name="test", project_id=GCP_PROJECT_ID) + } + accessapproval_client.project_ids = [GCP_PROJECT_ID] + accessapproval_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_account_access_approval_enabled.iam_account_access_approval_enabled.accessapproval_client", + new=accessapproval_client, + ): + from prowler.providers.gcp.services.iam.iam_account_access_approval_enabled.iam_account_access_approval_enabled import ( + iam_account_access_approval_enabled, + ) + + check = iam_account_access_approval_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has Access Approval enabled", + result[0].status_extended, + ) + assert result[0].resource_id == GCP_PROJECT_ID + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].location == "global" diff --git a/tests/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled_test.py b/tests/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled_test.py new file mode 100644 index 00000000..1b01c0ee --- /dev/null +++ b/tests/providers/gcp/services/iam/iam_audit_logs_enabled/iam_audit_logs_enabled_test.py @@ -0,0 +1,92 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_iam_audit_logs_enabled: + def test_iam_no_projects(self): + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.projects = [] + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_audit_logs_enabled.iam_audit_logs_enabled.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_audit_logs_enabled.iam_audit_logs_enabled import ( + iam_audit_logs_enabled, + ) + + check = iam_audit_logs_enabled() + result = check.execute() + assert len(result) == 0 + + def test_compliant_project(self): + from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_service import ( + Project, + ) + + project1 = Project(id=GCP_PROJECT_ID, audit_logging=True) + + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.projects = [project1] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_audit_logs_enabled.iam_audit_logs_enabled.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_audit_logs_enabled.iam_audit_logs_enabled import ( + iam_audit_logs_enabled, + ) + + check = iam_audit_logs_enabled() + result = check.execute() + + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "PASS" + assert search( + "Audit Logs are enabled for project", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region + + def test_uncompliant_project(self): + from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_service import ( + Project, + ) + + project1 = Project(id=GCP_PROJECT_ID, audit_logging=False) + + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.projects = [project1] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_audit_logs_enabled.iam_audit_logs_enabled.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_audit_logs_enabled.iam_audit_logs_enabled import ( + iam_audit_logs_enabled, + ) + + check = iam_audit_logs_enabled() + result = check.execute() + + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "FAIL" + assert search( + "Audit Logs are not enabled for project", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region diff --git a/tests/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured_test.py b/tests/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured_test.py new file mode 100644 index 00000000..f42856b7 --- /dev/null +++ b/tests/providers/gcp/services/iam/iam_organization_essential_contacts_configured/iam_organization_essential_contacts_configured_test.py @@ -0,0 +1,81 @@ +from re import search +from unittest import mock + +from prowler.providers.gcp.services.iam.iam_service import Organization + +GCP_PROJECT_ID = "123456789012" + + +class Test_iam_organization_essential_contacts_configured: + def test_iam_no_organizations(self): + essentialcontacts_client = mock.MagicMock + essentialcontacts_client.organizations = [] + essentialcontacts_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_organization_essential_contacts_configured.iam_organization_essential_contacts_configured.essentialcontacts_client", + new=essentialcontacts_client, + ): + from prowler.providers.gcp.services.iam.iam_organization_essential_contacts_configured.iam_organization_essential_contacts_configured import ( + iam_organization_essential_contacts_configured, + ) + + check = iam_organization_essential_contacts_configured() + result = check.execute() + assert len(result) == 0 + + def test_iam_org_with_contacts(self): + essentialcontacts_client = mock.MagicMock + essentialcontacts_client.organizations = [ + Organization(id="test_id", name="test", contacts=True) + ] + essentialcontacts_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_organization_essential_contacts_configured.iam_organization_essential_contacts_configured.essentialcontacts_client", + new=essentialcontacts_client, + ): + from prowler.providers.gcp.services.iam.iam_organization_essential_contacts_configured.iam_organization_essential_contacts_configured import ( + iam_organization_essential_contacts_configured, + ) + + check = iam_organization_essential_contacts_configured() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + "has essential contacts configured", + result[0].status_extended, + ) + assert result[0].resource_id == "test_id" + assert result[0].resource_name == "test" + assert result[0].project_id == "test_id" + assert result[0].location == "global" + + def test_iam_org_without_contacts(self): + essentialcontacts_client = mock.MagicMock + essentialcontacts_client.organizations = [ + Organization(id="test_id", name="test", contacts=False) + ] + essentialcontacts_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_organization_essential_contacts_configured.iam_organization_essential_contacts_configured.essentialcontacts_client", + new=essentialcontacts_client, + ): + from prowler.providers.gcp.services.iam.iam_organization_essential_contacts_configured.iam_organization_essential_contacts_configured import ( + iam_organization_essential_contacts_configured, + ) + + check = iam_organization_essential_contacts_configured() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + "does not have essential contacts configured", + result[0].status_extended, + ) + assert result[0].resource_id == "test_id" + assert result[0].resource_name == "test" + assert result[0].project_id == "test_id" + assert result[0].location == "global" diff --git a/tests/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties_test.py b/tests/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties_test.py new file mode 100644 index 00000000..74c12ff0 --- /dev/null +++ b/tests/providers/gcp/services/iam/iam_role_kms_enforce_separation_of_duties/iam_role_kms_enforce_separation_of_duties_test.py @@ -0,0 +1,129 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_iam_role_kms_enforce_separation_of_duties: + def test_iam_no_bindings(self): + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.bindings = [] + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_role_kms_enforce_separation_of_duties.iam_role_kms_enforce_separation_of_duties.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_role_kms_enforce_separation_of_duties.iam_role_kms_enforce_separation_of_duties import ( + iam_role_kms_enforce_separation_of_duties, + ) + + check = iam_role_kms_enforce_separation_of_duties() + result = check.execute() + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "PASS" + assert search( + "Principle of separation of duties was enforced for KMS-Related Roles", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region + + def test_three_compliant_binding(self): + from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_service import ( + Binding, + ) + + binding1 = Binding( + role="roles/cloudfunctions.serviceAgent", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding2 = Binding( + role="roles/compute.serviceAgent", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding3 = Binding( + role="roles/connectors.managedZoneViewer", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.bindings = [binding1, binding2, binding3] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_role_kms_enforce_separation_of_duties.iam_role_kms_enforce_separation_of_duties.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_role_kms_enforce_separation_of_duties.iam_role_kms_enforce_separation_of_duties import ( + iam_role_kms_enforce_separation_of_duties, + ) + + check = iam_role_kms_enforce_separation_of_duties() + result = check.execute() + + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "PASS" + assert search( + "Principle of separation of duties was enforced for KMS-Related Roles", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region + + def test_uncompliant_binding(self): + from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_service import ( + Binding, + ) + + binding1 = Binding( + role="roles/cloudkms.admin", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding2 = Binding( + role="roles/cloudkms.cryptoKeyEncrypterDecrypter", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding3 = Binding( + role="roles/connectors.managedZoneViewer", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.bindings = [binding1, binding2, binding3] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_role_kms_enforce_separation_of_duties.iam_role_kms_enforce_separation_of_duties.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_role_kms_enforce_separation_of_duties.iam_role_kms_enforce_separation_of_duties import ( + iam_role_kms_enforce_separation_of_duties, + ) + + check = iam_role_kms_enforce_separation_of_duties() + result = check.execute() + + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "FAIL" + assert search( + "Principle of separation of duties was not enforced for KMS-Related Roles", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region diff --git a/tests/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties_test.py b/tests/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties_test.py new file mode 100644 index 00000000..78e9cc46 --- /dev/null +++ b/tests/providers/gcp/services/iam/iam_role_sa_enforce_separation_of_duties/iam_role_sa_enforce_separation_of_duties_test.py @@ -0,0 +1,129 @@ +from re import search +from unittest import mock + +GCP_PROJECT_ID = "123456789012" + + +class Test_iam_role_sa_enforce_separation_of_duties: + def test_iam_no_bindings(self): + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.bindings = [] + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_role_sa_enforce_separation_of_duties.iam_role_sa_enforce_separation_of_duties.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_role_sa_enforce_separation_of_duties.iam_role_sa_enforce_separation_of_duties import ( + iam_role_sa_enforce_separation_of_duties, + ) + + check = iam_role_sa_enforce_separation_of_duties() + result = check.execute() + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "PASS" + assert search( + "Principle of separation of duties was enforced for Service-Account Related Roles", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region + + def test_three_compliant_binding(self): + from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_service import ( + Binding, + ) + + binding1 = Binding( + role="roles/cloudfunctions.serviceAgent", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding2 = Binding( + role="roles/compute.serviceAgent", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding3 = Binding( + role="roles/connectors.managedZoneViewer", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.bindings = [binding1, binding2, binding3] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_role_sa_enforce_separation_of_duties.iam_role_sa_enforce_separation_of_duties.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_role_sa_enforce_separation_of_duties.iam_role_sa_enforce_separation_of_duties import ( + iam_role_sa_enforce_separation_of_duties, + ) + + check = iam_role_sa_enforce_separation_of_duties() + result = check.execute() + + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "PASS" + assert search( + "Principle of separation of duties was enforced for Service-Account Related Roles", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region + + def test_one_uncompliant_binding(self): + from prowler.providers.gcp.services.cloudresourcemanager.cloudresourcemanager_service import ( + Binding, + ) + + binding1 = Binding( + role="roles/iam.serviceAccountUser", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding2 = Binding( + role="roles/compute.serviceAgent", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + binding3 = Binding( + role="roles/connectors.managedZoneViewer", + members=["serviceAccount:685829395199@cloudbuild.gserviceaccount.com"], + project_id=GCP_PROJECT_ID, + ) + + cloudresourcemanager_client = mock.MagicMock + cloudresourcemanager_client.project_ids = [GCP_PROJECT_ID] + cloudresourcemanager_client.bindings = [binding1, binding2, binding3] + cloudresourcemanager_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.iam.iam_role_sa_enforce_separation_of_duties.iam_role_sa_enforce_separation_of_duties.cloudresourcemanager_client", + new=cloudresourcemanager_client, + ): + from prowler.providers.gcp.services.iam.iam_role_sa_enforce_separation_of_duties.iam_role_sa_enforce_separation_of_duties import ( + iam_role_sa_enforce_separation_of_duties, + ) + + check = iam_role_sa_enforce_separation_of_duties() + result = check.execute() + + assert len(result) == 1 + for idx, r in enumerate(result): + assert r.status == "FAIL" + assert search( + "Principle of separation of duties was not enforced for Service-Account Related Roles", + r.status_extended, + ) + assert r.resource_id == GCP_PROJECT_ID + assert r.project_id == GCP_PROJECT_ID + assert r.location == cloudresourcemanager_client.region diff --git a/tests/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled_test.py b/tests/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled_test.py new file mode 100644 index 00000000..026e9f83 --- /dev/null +++ b/tests/providers/gcp/services/serviceusage/serviceusage_cloudasset_inventory_enabled/serviceusage_cloudasset_inventory_enabled_test.py @@ -0,0 +1,70 @@ +from re import search +from unittest import mock + +from prowler.providers.gcp.services.serviceusage.serviceusage_service import Service + +GCP_PROJECT_ID = "123456789012" + + +class Test_serviceusage_cloudasset_inventory_enabled: + def test_serviceusage_no_active_services(self): + serviceusage_client = mock.MagicMock + serviceusage_client.active_services = {} + serviceusage_client.project_ids = [GCP_PROJECT_ID] + serviceusage_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.serviceusage.serviceusage_cloudasset_inventory_enabled.serviceusage_cloudasset_inventory_enabled.serviceusage_client", + new=serviceusage_client, + ): + from prowler.providers.gcp.services.serviceusage.serviceusage_cloudasset_inventory_enabled.serviceusage_cloudasset_inventory_enabled import ( + serviceusage_cloudasset_inventory_enabled, + ) + + check = serviceusage_cloudasset_inventory_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "FAIL" + assert search( + f"Cloud Asset Inventory is not enabled in project {GCP_PROJECT_ID}", + result[0].status_extended, + ) + assert result[0].resource_id == "cloudasset.googleapis.com" + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].resource_name == "Cloud Asset Inventory" + assert result[0].location == serviceusage_client.region + + def test_serviceusage_active_cloudasset(self): + serviceusage_client = mock.MagicMock + serviceusage_client.active_services = { + GCP_PROJECT_ID: [ + Service( + name="cloudasset.googleapis.com", + title="Cloud Asset Inventory", + project_id=GCP_PROJECT_ID, + ) + ] + } + serviceusage_client.project_ids = [GCP_PROJECT_ID] + serviceusage_client.region = "global" + + with mock.patch( + "prowler.providers.gcp.services.serviceusage.serviceusage_cloudasset_inventory_enabled.serviceusage_cloudasset_inventory_enabled.serviceusage_client", + new=serviceusage_client, + ): + from prowler.providers.gcp.services.serviceusage.serviceusage_cloudasset_inventory_enabled.serviceusage_cloudasset_inventory_enabled import ( + serviceusage_cloudasset_inventory_enabled, + ) + + check = serviceusage_cloudasset_inventory_enabled() + result = check.execute() + assert len(result) == 1 + assert result[0].status == "PASS" + assert search( + f"Cloud Asset Inventory is enabled in project {GCP_PROJECT_ID}", + result[0].status_extended, + ) + assert result[0].resource_id == "cloudasset.googleapis.com" + assert result[0].project_id == GCP_PROJECT_ID + assert result[0].resource_name == "Cloud Asset Inventory" + assert result[0].location == serviceusage_client.region diff --git a/util/generate_compliance_json_from_csv_for_cis20_gcp.py b/util/generate_compliance_json_from_csv_for_cis20_gcp.py new file mode 100644 index 00000000..102b5366 --- /dev/null +++ b/util/generate_compliance_json_from_csv_for_cis20_gcp.py @@ -0,0 +1,40 @@ +import csv +import json +import sys + +# Convert a CSV file following the CIS 1.5 AWS benchmark into a Prowler v3.0 Compliance JSON file +# CSV fields: +# Id, Title,Checks,Attributes_Section,Attributes_Level,Attributes_AssessmentStatus,Attributes_Description,Attributes_RationalStatement,Attributes_ImpactStatement,Attributes_RemediationProcedure,Attributes_AuditProcedure,Attributes_AdditionalInformation,Attributes_References + +# get the CSV filename to convert from +file_name = sys.argv[1] + +# read the CSV file rows and use the column fields to form the Prowler compliance JSON file 'ens_rd2022_aws.json' +output = {"Framework": "CIS-GCP", "Version": "2.0", "Requirements": []} +with open(file_name, newline="", encoding="utf-8") as f: + reader = csv.reader(f, delimiter=",") + for row in reader: + attribute = { + "Section": row[0], + "Profile": row[2], + "AssessmentStatus": row[6], + "Description": row[9], + "RationaleStatement": row[10], + "ImpactStatement": row[11], + "RemediationProcedure": row[12], + "AuditProcedure": row[13], + "AdditionalInformation": row[14], + "References": row[28], + } + output["Requirements"].append( + { + "Id": row[1], + "Description": row[9], + "Checks": list(map(str.strip, row[4].split(","))), + "Attributes": [attribute], + } + ) + +# Write the output Prowler compliance JSON file 'cis_2.0_gcp.json' locally +with open("cis_2.0_gcp.json", "w", encoding="utf-8") as outfile: + json.dump(output, outfile, indent=4, ensure_ascii=False)