diff --git a/prowler/__main__.py b/prowler/__main__.py
index e572c5ed..64abaf62 100644
--- a/prowler/__main__.py
+++ b/prowler/__main__.py
@@ -23,14 +23,13 @@ from prowler.lib.check.compliance import update_checks_metadata_with_compliance
from prowler.lib.cli.parser import ProwlerArgumentParser
from prowler.lib.logger import logger, set_logging_config
from prowler.lib.outputs.outputs import (
- add_html_footer,
- close_json,
- display_compliance_table,
- display_summary_table,
extract_findings_statistics,
- fill_html_overview_statistics,
send_to_s3_bucket,
)
+from prowler.lib.outputs.compliance import display_compliance_table
+from prowler.lib.outputs.html import add_html_footer, fill_html_overview_statistics
+from prowler.lib.outputs.json import close_json
+from prowler.lib.outputs.summary_table import display_summary_table
from prowler.providers.aws.lib.allowlist.allowlist import parse_allowlist_file
from prowler.providers.aws.lib.quick_inventory.quick_inventory import quick_inventory
from prowler.providers.aws.lib.security_hub.security_hub import (
diff --git a/prowler/lib/cli/__init__.py b/prowler/lib/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/prowler/lib/outputs/compliance.py b/prowler/lib/outputs/compliance.py
new file mode 100644
index 00000000..50ea4fda
--- /dev/null
+++ b/prowler/lib/outputs/compliance.py
@@ -0,0 +1,322 @@
+import sys
+from csv import DictWriter
+
+from colorama import Fore, Style
+from tabulate import tabulate
+
+from prowler.config.config import timestamp
+from prowler.lib.logger import logger
+from prowler.lib.outputs.models import (
+ Check_Output_CSV_CIS,
+ Check_Output_CSV_ENS_RD2022,
+ generate_csv_fields,
+)
+
+
+def fill_compliance(output_options, finding, audit_info, file_descriptors):
+ # We have to retrieve all the check's compliance requirements
+ check_compliance = output_options.bulk_checks_metadata[
+ finding.check_metadata.CheckID
+ ].Compliance
+ csv_header = compliance_row = None
+ for compliance in check_compliance:
+ if (
+ compliance.Framework == "ENS"
+ and compliance.Version == "RD2022"
+ and "ens_rd2022_aws" in output_options.output_modes
+ ):
+ for requirement in compliance.Requirements:
+ requirement_description = requirement.Description
+ requirement_id = requirement.Id
+ for attribute in requirement.Attributes:
+ compliance_row = Check_Output_CSV_ENS_RD2022(
+ Provider=finding.check_metadata.Provider,
+ AccountId=audit_info.audited_account,
+ Region=finding.region,
+ AssessmentDate=timestamp.isoformat(),
+ Requirements_Id=requirement_id,
+ Requirements_Description=requirement_description,
+ Requirements_Attributes_IdGrupoControl=attribute.get(
+ "IdGrupoControl"
+ ),
+ Requirements_Attributes_Marco=attribute.get("Marco"),
+ Requirements_Attributes_Categoria=attribute.get("Categoria"),
+ Requirements_Attributes_DescripcionControl=attribute.get(
+ "DescripcionControl"
+ ),
+ Requirements_Attributes_Nivel=attribute.get("Nivel"),
+ Requirements_Attributes_Tipo=attribute.get("Tipo"),
+ Requirements_Attributes_Dimensiones=",".join(
+ attribute.get("Dimensiones")
+ ),
+ Status=finding.status,
+ StatusExtended=finding.status_extended,
+ ResourceId=finding.resource_id,
+ CheckId=finding.check_metadata.CheckID,
+ )
+
+ csv_header = generate_csv_fields(Check_Output_CSV_ENS_RD2022)
+
+ elif compliance.Framework == "CIS-AWS" and "cis" in str(
+ output_options.output_modes
+ ):
+ for requirement in compliance.Requirements:
+ requirement_description = requirement.Description
+ requirement_id = requirement.Id
+ for attribute in requirement.Attributes:
+ compliance_row = Check_Output_CSV_CIS(
+ Provider=finding.check_metadata.Provider,
+ AccountId=audit_info.audited_account,
+ Region=finding.region,
+ AssessmentDate=timestamp.isoformat(),
+ Requirements_Id=requirement_id,
+ Requirements_Description=requirement_description,
+ Requirements_Attributes_Section=attribute.get("Section"),
+ Requirements_Attributes_Profile=attribute.get("Profile"),
+ Requirements_Attributes_AssessmentStatus=attribute.get(
+ "AssessmentStatus"
+ ),
+ Requirements_Attributes_Description=attribute.get(
+ "Description"
+ ),
+ Requirements_Attributes_RationaleStatement=attribute.get(
+ "RationaleStatement"
+ ),
+ Requirements_Attributes_ImpactStatement=attribute.get(
+ "ImpactStatement"
+ ),
+ Requirements_Attributes_RemediationProcedure=attribute.get(
+ "RemediationProcedure"
+ ),
+ Requirements_Attributes_AuditProcedure=attribute.get(
+ "AuditProcedure"
+ ),
+ Requirements_Attributes_AdditionalInformation=attribute.get(
+ "AdditionalInformation"
+ ),
+ Requirements_Attributes_References=attribute.get("References"),
+ Status=finding.status,
+ StatusExtended=finding.status_extended,
+ ResourceId=finding.resource_id,
+ CheckId=finding.check_metadata.CheckID,
+ )
+
+ csv_header = generate_csv_fields(Check_Output_CSV_CIS)
+
+ if compliance_row:
+ csv_writer = DictWriter(
+ file_descriptors[output_options.output_modes[-1]],
+ fieldnames=csv_header,
+ delimiter=";",
+ )
+ csv_writer.writerow(compliance_row.__dict__)
+
+
+def display_compliance_table(
+ findings: list,
+ bulk_checks_metadata: dict,
+ compliance_framework: str,
+ output_filename: str,
+ output_directory: str,
+):
+ try:
+ if "ens_rd2022_aws" in compliance_framework:
+ marcos = {}
+ ens_compliance_table = {
+ "Proveedor": [],
+ "Marco/Categoria": [],
+ "Estado": [],
+ "PYTEC": [],
+ "Alto": [],
+ "Medio": [],
+ "Bajo": [],
+ }
+ pass_count = fail_count = 0
+ for finding in findings:
+ check = bulk_checks_metadata[finding.check_metadata.CheckID]
+ check_compliances = check.Compliance
+ for compliance in check_compliances:
+ if (
+ compliance.Framework == "ENS"
+ and compliance.Provider == "AWS"
+ and compliance.Version == "RD2022"
+ ):
+ compliance_version = compliance.Version
+ compliance_fm = compliance.Framework
+ compliance_provider = compliance.Provider
+ for requirement in compliance.Requirements:
+ for attribute in requirement.Attributes:
+ marco_categoria = (
+ f"{attribute['Marco']}/{attribute['Categoria']}"
+ )
+ # Check if Marco/Categoria exists
+ if marco_categoria not in marcos:
+ marcos[marco_categoria] = {
+ "Estado": f"{Fore.GREEN}CUMPLE{Style.RESET_ALL}",
+ "Pytec": 0,
+ "Alto": 0,
+ "Medio": 0,
+ "Bajo": 0,
+ }
+ if finding.status == "FAIL":
+ fail_count += 1
+ marcos[marco_categoria][
+ "Estado"
+ ] = f"{Fore.RED}NO CUMPLE{Style.RESET_ALL}"
+ elif finding.status == "PASS":
+ pass_count += 1
+ if attribute["Nivel"] == "pytec":
+ marcos[marco_categoria]["Pytec"] += 1
+ elif attribute["Nivel"] == "alto":
+ marcos[marco_categoria]["Alto"] += 1
+ elif attribute["Nivel"] == "medio":
+ marcos[marco_categoria]["Medio"] += 1
+ elif attribute["Nivel"] == "bajo":
+ marcos[marco_categoria]["Bajo"] += 1
+
+ # Add results to table
+ for marco in marcos:
+ ens_compliance_table["Proveedor"].append("aws")
+ ens_compliance_table["Marco/Categoria"].append(marco)
+ ens_compliance_table["Estado"].append(marcos[marco]["Estado"])
+ ens_compliance_table["PYTEC"].append(
+ f"{Fore.LIGHTRED_EX}{marcos[marco]['Pytec']}{Style.RESET_ALL}"
+ )
+ ens_compliance_table["Alto"].append(
+ f"{Fore.RED}{marcos[marco]['Alto']}{Style.RESET_ALL}"
+ )
+ ens_compliance_table["Medio"].append(
+ f"{Fore.YELLOW}{marcos[marco]['Medio']}{Style.RESET_ALL}"
+ )
+ ens_compliance_table["Bajo"].append(
+ f"{Fore.BLUE}{marcos[marco]['Bajo']}{Style.RESET_ALL}"
+ )
+ if fail_count + pass_count < 0:
+ print(
+ f"\n {Style.BRIGHT}There are no resources for {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}.\n"
+ )
+ else:
+ print(
+ f"\nEstado de Cumplimiento de {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}:"
+ )
+ overview_table = [
+ [
+ f"{Fore.RED}{round(fail_count/(fail_count+pass_count)*100, 2)}% ({fail_count}) NO CUMPLE{Style.RESET_ALL}",
+ f"{Fore.GREEN}{round(pass_count/(fail_count+pass_count)*100, 2)}% ({pass_count}) CUMPLE{Style.RESET_ALL}",
+ ]
+ ]
+ print(tabulate(overview_table, tablefmt="rounded_grid"))
+ print(
+ f"\nResultados de {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}:"
+ )
+ print(
+ tabulate(
+ ens_compliance_table, headers="keys", tablefmt="rounded_grid"
+ )
+ )
+ print(
+ f"{Style.BRIGHT}* Solo aparece el Marco/Categoria que contiene resultados.{Style.RESET_ALL}"
+ )
+ print("\nResultados detallados en:")
+ print(
+ f" - CSV: {output_directory}/{output_filename}_{compliance_framework[0]}.csv\n"
+ )
+ if "cis" in str(compliance_framework):
+ sections = {}
+ cis_compliance_table = {
+ "Provider": [],
+ "Section": [],
+ "Level 1": [],
+ "Level 2": [],
+ }
+ pass_count = fail_count = 0
+ for finding in findings:
+ check = bulk_checks_metadata[finding.check_metadata.CheckID]
+ check_compliances = check.Compliance
+ for compliance in check_compliances:
+ if compliance.Framework == "CIS-AWS" and compliance.Version in str(
+ compliance_framework
+ ):
+ compliance_version = compliance.Version
+ compliance_fm = compliance.Framework
+ for requirement in compliance.Requirements:
+ for attribute in requirement.Attributes:
+ section = attribute["Section"]
+ # Check if Section exists
+ if section not in sections:
+ sections[section] = {
+ "Status": f"{Fore.GREEN}PASS{Style.RESET_ALL}",
+ "Level 1": {"FAIL": 0, "PASS": 0},
+ "Level 2": {"FAIL": 0, "PASS": 0},
+ }
+ if finding.status == "FAIL":
+ fail_count += 1
+ elif finding.status == "PASS":
+ pass_count += 1
+ if attribute["Profile"] == "Level 1":
+ if finding.status == "FAIL":
+ sections[section]["Level 1"]["FAIL"] += 1
+ else:
+ sections[section]["Level 1"]["PASS"] += 1
+ elif attribute["Profile"] == "Level 2":
+ if finding.status == "FAIL":
+ sections[section]["Level 2"]["FAIL"] += 1
+ else:
+ sections[section]["Level 2"]["PASS"] += 1
+
+ # Add results to table
+ sections = dict(sorted(sections.items()))
+ for section in sections:
+ cis_compliance_table["Provider"].append("aws")
+ cis_compliance_table["Section"].append(section)
+ if sections[section]["Level 1"]["FAIL"] > 0:
+ cis_compliance_table["Level 1"].append(
+ f"{Fore.RED}FAIL({sections[section]['Level 1']['FAIL']}){Style.RESET_ALL}"
+ )
+ else:
+ cis_compliance_table["Level 1"].append(
+ f"{Fore.GREEN}PASS({sections[section]['Level 1']['PASS']}){Style.RESET_ALL}"
+ )
+ if sections[section]["Level 2"]["FAIL"] > 0:
+ cis_compliance_table["Level 2"].append(
+ f"{Fore.RED}FAIL({sections[section]['Level 2']['FAIL']}){Style.RESET_ALL}"
+ )
+ else:
+ cis_compliance_table["Level 2"].append(
+ f"{Fore.GREEN}PASS({sections[section]['Level 2']['PASS']}){Style.RESET_ALL}"
+ )
+ if fail_count + pass_count < 0:
+ print(
+ f"\n {Style.BRIGHT}There are no resources for {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL}.\n"
+ )
+ else:
+ print(
+ f"\nCompliance Status of {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL} Framework:"
+ )
+ overview_table = [
+ [
+ f"{Fore.RED}{round(fail_count/(fail_count+pass_count)*100, 2)}% ({fail_count}) FAIL{Style.RESET_ALL}",
+ f"{Fore.GREEN}{round(pass_count/(fail_count+pass_count)*100, 2)}% ({pass_count}) PASS{Style.RESET_ALL}",
+ ]
+ ]
+ print(tabulate(overview_table, tablefmt="rounded_grid"))
+ print(
+ f"\nFramework {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL} Results:"
+ )
+ print(
+ tabulate(
+ cis_compliance_table, headers="keys", tablefmt="rounded_grid"
+ )
+ )
+ print(
+ f"{Style.BRIGHT}* Only sections containing results appear.{Style.RESET_ALL}"
+ )
+ print("\nDetailed Results in:")
+ print(
+ f" - CSV: {output_directory}/{output_filename}_{compliance_framework[0]}.csv\n"
+ )
+ except Exception as error:
+ logger.critical(
+ f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
+ )
+ sys.exit()
diff --git a/prowler/lib/outputs/file_descriptors.py b/prowler/lib/outputs/file_descriptors.py
new file mode 100644
index 00000000..87bf66bb
--- /dev/null
+++ b/prowler/lib/outputs/file_descriptors.py
@@ -0,0 +1,140 @@
+from csv import DictWriter
+from io import TextIOWrapper
+from typing import Any
+
+from prowler.config.config import (
+ csv_file_suffix,
+ html_file_suffix,
+ json_asff_file_suffix,
+ json_file_suffix,
+)
+from prowler.lib.logger import logger
+from prowler.lib.outputs.html import add_html_header
+from prowler.lib.outputs.models import (
+ Aws_Check_Output_CSV,
+ Azure_Check_Output_CSV,
+ Check_Output_CSV_CIS,
+ Check_Output_CSV_ENS_RD2022,
+ generate_csv_fields,
+)
+from prowler.lib.utils.utils import file_exists, open_file
+from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
+from prowler.providers.azure.lib.audit_info.models import Azure_Audit_Info
+
+
+def initialize_file_descriptor(
+ filename: str,
+ output_mode: str,
+ audit_info: AWS_Audit_Info,
+ format: Any = None,
+) -> TextIOWrapper:
+ """Open/Create the output file. If needed include headers or the required format"""
+ try:
+ if file_exists(filename):
+ file_descriptor = open_file(
+ filename,
+ "a",
+ )
+ else:
+ file_descriptor = open_file(
+ filename,
+ "a",
+ )
+
+ if output_mode in ("csv", "ens_rd2022_aws", "cis_1.5_aws", "cis_1.4_aws"):
+ # Format is the class model of the CSV format to print the headers
+ csv_header = [x.upper() for x in generate_csv_fields(format)]
+ csv_writer = DictWriter(
+ file_descriptor, fieldnames=csv_header, delimiter=";"
+ )
+ csv_writer.writeheader()
+
+ if output_mode in ("json", "json-asff"):
+ file_descriptor.write("[")
+ if "html" in output_mode:
+ add_html_header(file_descriptor, audit_info)
+ except Exception as error:
+ logger.error(
+ f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
+ )
+
+ return file_descriptor
+
+
+def fill_file_descriptors(output_modes, output_directory, output_filename, audit_info):
+ try:
+ file_descriptors = {}
+ if output_modes:
+ for output_mode in output_modes:
+ if output_mode == "csv":
+ filename = f"{output_directory}/{output_filename}{csv_file_suffix}"
+ if isinstance(audit_info, AWS_Audit_Info):
+ file_descriptor = initialize_file_descriptor(
+ filename,
+ output_mode,
+ audit_info,
+ Aws_Check_Output_CSV,
+ )
+ if isinstance(audit_info, Azure_Audit_Info):
+ file_descriptor = initialize_file_descriptor(
+ filename,
+ output_mode,
+ audit_info,
+ Azure_Check_Output_CSV,
+ )
+ file_descriptors.update({output_mode: file_descriptor})
+
+ if output_mode == "json":
+ filename = f"{output_directory}/{output_filename}{json_file_suffix}"
+ file_descriptor = initialize_file_descriptor(
+ filename, output_mode, audit_info
+ )
+ file_descriptors.update({output_mode: file_descriptor})
+
+ if isinstance(audit_info, AWS_Audit_Info):
+
+ if output_mode == "json-asff":
+ filename = f"{output_directory}/{output_filename}{json_asff_file_suffix}"
+ file_descriptor = initialize_file_descriptor(
+ filename, output_mode, audit_info
+ )
+ file_descriptors.update({output_mode: file_descriptor})
+
+ if output_mode == "html":
+ filename = (
+ f"{output_directory}/{output_filename}{html_file_suffix}"
+ )
+ file_descriptor = initialize_file_descriptor(
+ filename, output_mode, audit_info
+ )
+ file_descriptors.update({output_mode: file_descriptor})
+
+ if output_mode == "ens_rd2022_aws":
+ filename = f"{output_directory}/{output_filename}_ens_rd2022_aws{csv_file_suffix}"
+ file_descriptor = initialize_file_descriptor(
+ filename,
+ output_mode,
+ audit_info,
+ Check_Output_CSV_ENS_RD2022,
+ )
+ file_descriptors.update({output_mode: file_descriptor})
+
+ if output_mode == "cis_1.5_aws":
+ filename = f"{output_directory}/{output_filename}_cis_1.5_aws{csv_file_suffix}"
+ file_descriptor = initialize_file_descriptor(
+ filename, output_mode, audit_info, Check_Output_CSV_CIS
+ )
+ file_descriptors.update({output_mode: file_descriptor})
+
+ if output_mode == "cis_1.4_aws":
+ filename = f"{output_directory}/{output_filename}_cis_1.4_aws{csv_file_suffix}"
+ file_descriptor = initialize_file_descriptor(
+ filename, output_mode, audit_info, Check_Output_CSV_CIS
+ )
+ file_descriptors.update({output_mode: file_descriptor})
+ except Exception as error:
+ logger.error(
+ f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
+ )
+
+ return file_descriptors
diff --git a/prowler/lib/outputs/html.py b/prowler/lib/outputs/html.py
new file mode 100644
index 00000000..9135766b
--- /dev/null
+++ b/prowler/lib/outputs/html.py
@@ -0,0 +1,361 @@
+import sys
+
+from prowler.config.config import (
+ html_file_suffix,
+ html_logo_img,
+ html_logo_url,
+ prowler_version,
+ timestamp,
+)
+from prowler.lib.logger import logger
+from prowler.lib.utils.utils import open_file
+
+
+def add_html_header(file_descriptor, audit_info):
+ try:
+ if not audit_info.profile:
+ audit_info.profile = "ENV"
+ if isinstance(audit_info.audited_regions, list):
+ audited_regions = " ".join(audit_info.audited_regions)
+ elif not audit_info.audited_regions:
+ audited_regions = "All Regions"
+ else:
+ audited_regions = audit_info.audited_regions
+ file_descriptor.write(
+ """
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Prowler - The Handy Cloud Security Tool
+
+
+
+
+
+
![prowler-logo]()
+
+
+
+ -
+
+
+ Version: """
+ + prowler_version
+ + """
+
+
+
+ -
+ Parameters used: """
+ + " ".join(sys.argv[1:])
+ + """
+
+ -
+ Date: """
+ + timestamp.isoformat()
+ + """
+
+
+
+
+
+
+
+
+ -
+ AWS Account: """
+ + audit_info.audited_account
+ + """
+
+ -
+ AWS-CLI Profile: """
+ + audit_info.profile
+ + """
+
+ -
+ Audited Regions: """
+ + audited_regions
+ + """
+
+
+
+
+
+
+
+
+ -
+ User Id: """
+ + audit_info.audited_user_id
+ + """
+
+ -
+ Caller Identity ARN:
+ """
+ + audit_info.audited_identity_arn
+ + """
+
+
+
+
+
+
+
+
+ -
+ Total Findings: TOTAL_FINDINGS
+
+ -
+ Passed: TOTAL_PASS
+
+ -
+ Failed: TOTAL_FAIL
+
+ -
+ Total Resources: TOTAL_RESOURCES
+
+
+
+
+
+
+
+
+
+
+
+ | Status |
+ Severity |
+ Service Name |
+ Region |
+ Check Title |
+ Resource ID |
+ Check Description |
+ Check ID |
+ Status Extended |
+ Risk |
+ Recomendation |
+ Recomendation URL |
+
+
+
+ """
+ )
+ except Exception as error:
+ logger.error(
+ f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
+ )
+
+
+def fill_html(file_descriptor, finding):
+ row_class = "p-3 mb-2 bg-success-custom"
+ if finding.status == "INFO":
+ row_class = "table-info"
+ elif finding.status == "FAIL":
+ row_class = "table-danger"
+ elif finding.status == "WARNING":
+ row_class = "table-warning"
+ file_descriptor.write(
+ f"""
+
+ | {finding.status} |
+ {finding.check_metadata.Severity} |
+ {finding.check_metadata.ServiceName} |
+ {finding.region} |
+ {finding.check_metadata.CheckTitle} |
+ {finding.resource_id.replace("<", "<").replace(">", ">").replace("_", "_")} |
+ {finding.check_metadata.Description} |
+ {finding.check_metadata.CheckID.replace("_", "_")} |
+ {finding.status_extended.replace("<", "<").replace(">", ">").replace("_", "_")} |
+ {finding.check_metadata.Risk} |
+ {finding.check_metadata.Remediation.Recommendation.Text} |
+ |
+
+ """
+ )
+
+
+def fill_html_overview_statistics(stats, output_filename, output_directory):
+ try:
+ filename = f"{output_directory}/{output_filename}{html_file_suffix}"
+ # Read file
+ with open(filename, "r") as file:
+ filedata = file.read()
+
+ # Replace statistics
+ # TOTAL_FINDINGS
+ filedata = filedata.replace("TOTAL_FINDINGS", str(stats.get("findings_count")))
+ # TOTAL_RESOURCES
+ filedata = filedata.replace(
+ "TOTAL_RESOURCES", str(stats.get("resources_count"))
+ )
+ # TOTAL_PASS
+ filedata = filedata.replace("TOTAL_PASS", str(stats.get("total_pass")))
+ # TOTAL_FAIL
+ filedata = filedata.replace("TOTAL_FAIL", str(stats.get("total_fail")))
+ # Write file
+ with open(filename, "w") as file:
+ file.write(filedata)
+
+ except Exception as error:
+ logger.critical(
+ f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
+ )
+ sys.exit()
+
+
+def add_html_footer(output_filename, output_directory):
+ try:
+ filename = f"{output_directory}/{output_filename}{html_file_suffix}"
+ file_descriptor = open_file(
+ filename,
+ "a",
+ )
+ file_descriptor.write(
+ """
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+ )
+ file_descriptor.close()
+ except Exception as error:
+ logger.critical(
+ f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
+ )
+ sys.exit()
diff --git a/prowler/lib/outputs/json.py b/prowler/lib/outputs/json.py
new file mode 100644
index 00000000..a40c5dc0
--- /dev/null
+++ b/prowler/lib/outputs/json.py
@@ -0,0 +1,77 @@
+import os
+import sys
+
+from prowler.config.config import (
+ json_asff_file_suffix,
+ json_file_suffix,
+ prowler_version,
+ timestamp_utc,
+)
+from prowler.lib.logger import logger
+from prowler.lib.outputs.models import Compliance, ProductFields, Resource, Severity
+from prowler.lib.utils.utils import hash_sha512, open_file
+
+
+def fill_json_asff(finding_output, audit_info, finding):
+ # Check if there are no resources in the finding
+ if finding.resource_id == "":
+ finding.resource_id = "NONE_PROVIDED"
+ finding_output.Id = f"prowler-{finding.check_metadata.CheckID}-{audit_info.audited_account}-{finding.region}-{hash_sha512(finding.resource_id)}"
+ finding_output.ProductArn = f"arn:{audit_info.audited_partition}:securityhub:{finding.region}::product/prowler/prowler"
+ finding_output.ProductFields = ProductFields(
+ ProviderVersion=prowler_version, ProwlerResourceName=finding.resource_id
+ )
+ finding_output.GeneratorId = "prowler-" + finding.check_metadata.CheckID
+ finding_output.AwsAccountId = audit_info.audited_account
+ finding_output.Types = finding.check_metadata.CheckType
+ finding_output.FirstObservedAt = (
+ finding_output.UpdatedAt
+ ) = finding_output.CreatedAt = timestamp_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
+ finding_output.Severity = Severity(Label=finding.check_metadata.Severity.upper())
+ finding_output.Title = finding.check_metadata.CheckTitle
+ finding_output.Description = finding.check_metadata.Description
+ finding_output.Resources = [
+ Resource(
+ Id=finding.resource_id,
+ Type=finding.check_metadata.ResourceType,
+ Partition=audit_info.audited_partition,
+ Region=finding.region,
+ )
+ ]
+ # Check if any Requirement has > 64 characters
+ check_types = []
+ for type in finding.check_metadata.CheckType:
+ check_types.extend(type.split("/"))
+ # Add ED to PASS or FAIL (PASSED/FAILED)
+ finding_output.Compliance = Compliance(
+ Status=finding.status + "ED",
+ RelatedRequirements=check_types,
+ )
+ finding_output.Remediation = {
+ "Recommendation": finding.check_metadata.Remediation.Recommendation
+ }
+
+ return finding_output
+
+
+def close_json(output_filename, output_directory, mode):
+ try:
+ suffix = json_file_suffix
+ if mode == "json-asff":
+ suffix = json_asff_file_suffix
+ filename = f"{output_directory}/{output_filename}{suffix}"
+ file_descriptor = open_file(
+ filename,
+ "a",
+ )
+ # Replace last comma for square bracket if not empty
+ if file_descriptor.tell() > 0:
+ file_descriptor.seek(file_descriptor.tell() - 1, os.SEEK_SET)
+ file_descriptor.truncate()
+ file_descriptor.write("]")
+ file_descriptor.close()
+ except Exception as error:
+ logger.critical(
+ f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
+ )
+ sys.exit()
diff --git a/prowler/lib/outputs/models.py b/prowler/lib/outputs/models.py
index b9425578..60776236 100644
--- a/prowler/lib/outputs/models.py
+++ b/prowler/lib/outputs/models.py
@@ -29,6 +29,9 @@ def generate_provider_output_csv(provider: str, finding, audit_info, mode: str,
data["resource_name"] = finding.resource_name
data["subscription"] = finding.subscription
data["tenant_domain"] = audit_info.identity.domain
+ data[
+ "finding_unique_id"
+ ] = f"prowler-{provider}-{finding.check_metadata.CheckID}-{finding.subscription}-{finding.resource_id}"
finding_output = output_model(**data)
if provider == "aws":
@@ -37,6 +40,9 @@ def generate_provider_output_csv(provider: str, finding, audit_info, mode: str,
data["region"] = finding.region
data["resource_id"] = finding.resource_id
data["resource_arn"] = finding.resource_arn
+ data[
+ "finding_unique_id"
+ ] = f"prowler-{provider}-{finding.check_metadata.CheckID}-{audit_info.audited_account}-{finding.region}-{finding.resource_id}"
finding_output = output_model(**data)
if audit_info.organizations_metadata:
@@ -221,6 +227,7 @@ def generate_provider_output_json(provider: str, finding, audit_info, mode: str,
finding_output.Subscription = finding.subscription
finding_output.ResourceId = finding.resource_id
finding_output.ResourceName = finding.resource_name
+ finding_output.FindingUniqueId = f"prowler-{provider}-{finding.check_metadata.CheckID}-{finding.subscription}-{finding.resource_id}"
if provider == "aws":
finding_output.Profile = audit_info.profile
@@ -228,6 +235,7 @@ def generate_provider_output_json(provider: str, finding, audit_info, mode: str,
finding_output.Region = finding.region
finding_output.ResourceId = finding.resource_id
finding_output.ResourceArn = finding.resource_arn
+ finding_output.FindingUniqueId = f"prowler-{provider}-{finding.check_metadata.CheckID}-{audit_info.audited_account}-{finding.region}-{finding.resource_id}"
if audit_info.organizations_metadata:
finding_output.OrganizationsInfo = (
diff --git a/prowler/lib/outputs/outputs.py b/prowler/lib/outputs/outputs.py
index ff2b905c..8628eb55 100644
--- a/prowler/lib/outputs/outputs.py
+++ b/prowler/lib/outputs/outputs.py
@@ -1,46 +1,28 @@
import json
-import os
import sys
-from csv import DictWriter
-from io import TextIOWrapper
-from typing import Any
from colorama import Fore, Style
-from tabulate import tabulate
from prowler.config.config import (
csv_file_suffix,
- html_file_suffix,
- html_logo_img,
- html_logo_url,
json_asff_file_suffix,
json_file_suffix,
orange_color,
- prowler_version,
- timestamp,
- timestamp_utc,
)
from prowler.lib.logger import logger
+from prowler.lib.outputs.compliance import fill_compliance
+from prowler.lib.outputs.file_descriptors import fill_file_descriptors
+from prowler.lib.outputs.html import fill_html
+from prowler.lib.outputs.json import fill_json_asff
from prowler.lib.outputs.models import (
- Aws_Check_Output_CSV,
- Azure_Check_Output_CSV,
- Check_Output_CSV_CIS,
- Check_Output_CSV_ENS_RD2022,
Check_Output_JSON_ASFF,
- Compliance,
- ProductFields,
- Resource,
- Severity,
- generate_csv_fields,
generate_provider_output_csv,
generate_provider_output_json,
)
-from prowler.lib.utils.utils import file_exists, hash_sha512, open_file
from prowler.providers.aws.lib.allowlist.allowlist import is_allowlisted
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
from prowler.providers.aws.lib.security_hub.security_hub import send_to_security_hub
from prowler.providers.azure.lib.audit_info.models import Azure_Audit_Info
-from prowler.providers.common.outputs import Provider_Output_Options
def stdout_report(finding, color, verbose, is_quiet):
@@ -101,137 +83,17 @@ def report(check_findings, output_options, audit_info):
if file_descriptors:
# AWS specific outputs
if finding.check_metadata.Provider == "aws":
- if "ens_rd2022_aws" in output_options.output_modes:
- # We have to retrieve all the check's compliance requirements
- check_compliance = output_options.bulk_checks_metadata[
- finding.check_metadata.CheckID
- ].Compliance
- for compliance in check_compliance:
- if (
- compliance.Framework == "ENS"
- and compliance.Version == "RD2022"
- ):
- for requirement in compliance.Requirements:
- requirement_description = (
- requirement.Description
- )
- requirement_id = requirement.Id
- for attribute in requirement.Attributes:
- compliance_row = Check_Output_CSV_ENS_RD2022(
- Provider=finding.check_metadata.Provider,
- AccountId=audit_info.audited_account,
- Region=finding.region,
- AssessmentDate=timestamp.isoformat(),
- Requirements_Id=requirement_id,
- Requirements_Description=requirement_description,
- Requirements_Attributes_IdGrupoControl=attribute.get(
- "IdGrupoControl"
- ),
- Requirements_Attributes_Marco=attribute.get(
- "Marco"
- ),
- Requirements_Attributes_Categoria=attribute.get(
- "Categoria"
- ),
- Requirements_Attributes_DescripcionControl=attribute.get(
- "DescripcionControl"
- ),
- Requirements_Attributes_Nivel=attribute.get(
- "Nivel"
- ),
- Requirements_Attributes_Tipo=attribute.get(
- "Tipo"
- ),
- Requirements_Attributes_Dimensiones=",".join(
- attribute.get("Dimensiones")
- ),
- Status=finding.status,
- StatusExtended=finding.status_extended,
- ResourceId=finding.resource_id,
- CheckId=finding.check_metadata.CheckID,
- )
-
- csv_header = generate_csv_fields(
- Check_Output_CSV_ENS_RD2022
- )
- csv_writer = DictWriter(
- file_descriptors["ens_rd2022_aws"],
- fieldnames=csv_header,
- delimiter=";",
- )
- csv_writer.writerow(compliance_row.__dict__)
- elif "cis" in str(output_options.output_modes):
- # We have to retrieve all the check's compliance requirements
- check_compliance = output_options.bulk_checks_metadata[
- finding.check_metadata.CheckID
- ].Compliance
- for compliance in check_compliance:
- if compliance.Framework == "CIS-AWS":
- for requirement in compliance.Requirements:
- requirement_description = (
- requirement.Description
- )
- requirement_id = requirement.Id
- for attribute in requirement.Attributes:
- compliance_row = Check_Output_CSV_CIS(
- Provider=finding.check_metadata.Provider,
- AccountId=audit_info.audited_account,
- Region=finding.region,
- AssessmentDate=timestamp.isoformat(),
- Requirements_Id=requirement_id,
- Requirements_Description=requirement_description,
- Requirements_Attributes_Section=attribute.get(
- "Section"
- ),
- Requirements_Attributes_Profile=attribute.get(
- "Profile"
- ),
- Requirements_Attributes_AssessmentStatus=attribute.get(
- "AssessmentStatus"
- ),
- Requirements_Attributes_Description=attribute.get(
- "Description"
- ),
- Requirements_Attributes_RationaleStatement=attribute.get(
- "RationaleStatement"
- ),
- Requirements_Attributes_ImpactStatement=attribute.get(
- "ImpactStatement"
- ),
- Requirements_Attributes_RemediationProcedure=attribute.get(
- "RemediationProcedure"
- ),
- Requirements_Attributes_AuditProcedure=attribute.get(
- "AuditProcedure"
- ),
- Requirements_Attributes_AdditionalInformation=attribute.get(
- "AdditionalInformation"
- ),
- Requirements_Attributes_References=attribute.get(
- "References"
- ),
- Status=finding.status,
- StatusExtended=finding.status_extended,
- ResourceId=finding.resource_id,
- CheckId=finding.check_metadata.CheckID,
- )
-
- csv_header = generate_csv_fields(
- Check_Output_CSV_CIS
- )
- csv_writer = DictWriter(
- file_descriptors[
- output_options.output_modes[-1]
- ],
- fieldnames=csv_header,
- delimiter=";",
- )
- csv_writer.writerow(compliance_row.__dict__)
+ if (
+ "ens_rd2022_aws" in output_options.output_modes
+ or "cis" in str(output_options.output_modes)
+ ):
+ fill_compliance(
+ output_options, finding, audit_info, file_descriptors
+ )
if "html" in file_descriptors:
fill_html(file_descriptors["html"], finding)
-
- file_descriptors["html"].write("")
+ file_descriptors["html"].write("")
if "json-asff" in file_descriptors:
finding_output = Check_Output_JSON_ASFF()
@@ -245,7 +107,10 @@ def report(check_findings, output_options, audit_info):
file_descriptors["json-asff"].write(",")
# Check if it is needed to send findings to security hub
- if output_options.security_hub_enabled:
+ if (
+ output_options.security_hub_enabled
+ and finding.status != "INFO"
+ ):
send_to_security_hub(
finding.region, finding_output, audit_info.audit_session
)
@@ -293,124 +158,6 @@ def report(check_findings, output_options, audit_info):
)
-def initialize_file_descriptor(
- filename: str,
- output_mode: str,
- audit_info: AWS_Audit_Info,
- format: Any = None,
-) -> TextIOWrapper:
- """Open/Create the output file. If needed include headers or the required format"""
- try:
- if file_exists(filename):
- file_descriptor = open_file(
- filename,
- "a",
- )
- else:
- file_descriptor = open_file(
- filename,
- "a",
- )
-
- if output_mode in ("csv", "ens_rd2022_aws", "cis_1.5_aws", "cis_1.4_aws"):
- # Format is the class model of the CSV format to print the headers
- csv_header = [x.upper() for x in generate_csv_fields(format)]
- csv_writer = DictWriter(
- file_descriptor, fieldnames=csv_header, delimiter=";"
- )
- csv_writer.writeheader()
-
- if output_mode in ("json", "json-asff"):
- file_descriptor.write("[")
- if "html" in output_mode:
- add_html_header(file_descriptor, audit_info)
- except Exception as error:
- logger.error(
- f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
- )
-
- return file_descriptor
-
-
-def fill_file_descriptors(output_modes, output_directory, output_filename, audit_info):
- try:
- file_descriptors = {}
- if output_modes:
- for output_mode in output_modes:
- if output_mode == "csv":
- filename = f"{output_directory}/{output_filename}{csv_file_suffix}"
- if isinstance(audit_info, AWS_Audit_Info):
- file_descriptor = initialize_file_descriptor(
- filename,
- output_mode,
- audit_info,
- Aws_Check_Output_CSV,
- )
- if isinstance(audit_info, Azure_Audit_Info):
- file_descriptor = initialize_file_descriptor(
- filename,
- output_mode,
- audit_info,
- Azure_Check_Output_CSV,
- )
- file_descriptors.update({output_mode: file_descriptor})
-
- if output_mode == "json":
- filename = f"{output_directory}/{output_filename}{json_file_suffix}"
- file_descriptor = initialize_file_descriptor(
- filename, output_mode, audit_info
- )
- file_descriptors.update({output_mode: file_descriptor})
-
- if isinstance(audit_info, AWS_Audit_Info):
-
- if output_mode == "json-asff":
- filename = f"{output_directory}/{output_filename}{json_asff_file_suffix}"
- file_descriptor = initialize_file_descriptor(
- filename, output_mode, audit_info
- )
- file_descriptors.update({output_mode: file_descriptor})
-
- if output_mode == "html":
- filename = (
- f"{output_directory}/{output_filename}{html_file_suffix}"
- )
- file_descriptor = initialize_file_descriptor(
- filename, output_mode, audit_info
- )
- file_descriptors.update({output_mode: file_descriptor})
-
- if output_mode == "ens_rd2022_aws":
- filename = f"{output_directory}/{output_filename}_ens_rd2022_aws{csv_file_suffix}"
- file_descriptor = initialize_file_descriptor(
- filename,
- output_mode,
- audit_info,
- Check_Output_CSV_ENS_RD2022,
- )
- file_descriptors.update({output_mode: file_descriptor})
-
- if output_mode == "cis_1.5_aws":
- filename = f"{output_directory}/{output_filename}_cis_1.5_aws{csv_file_suffix}"
- file_descriptor = initialize_file_descriptor(
- filename, output_mode, audit_info, Check_Output_CSV_CIS
- )
- file_descriptors.update({output_mode: file_descriptor})
-
- if output_mode == "cis_1.4_aws":
- filename = f"{output_directory}/{output_filename}_cis_1.4_aws{csv_file_suffix}"
- file_descriptor = initialize_file_descriptor(
- filename, output_mode, audit_info, Check_Output_CSV_CIS
- )
- file_descriptors.update({output_mode: file_descriptor})
- except Exception as error:
- logger.error(
- f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
- )
-
- return file_descriptors
-
-
def set_report_color(status: str) -> str:
"""Return the color for a give result status"""
color = ""
@@ -429,67 +176,6 @@ def set_report_color(status: str) -> str:
return color
-def fill_json_asff(finding_output, audit_info, finding):
- # Check if there are no resources in the finding
- if finding.resource_id == "":
- finding.resource_id = "NONE_PROVIDED"
- finding_output.Id = f"prowler-{finding.check_metadata.CheckID}-{audit_info.audited_account}-{finding.region}-{hash_sha512(finding.resource_id)}"
- finding_output.ProductArn = f"arn:{audit_info.audited_partition}:securityhub:{finding.region}::product/prowler/prowler"
- finding_output.ProductFields = ProductFields(
- ProviderVersion=prowler_version, ProwlerResourceName=finding.resource_id
- )
- finding_output.GeneratorId = "prowler-" + finding.check_metadata.CheckID
- finding_output.AwsAccountId = audit_info.audited_account
- finding_output.Types = finding.check_metadata.CheckType
- finding_output.FirstObservedAt = (
- finding_output.UpdatedAt
- ) = finding_output.CreatedAt = timestamp_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
- finding_output.Severity = Severity(Label=finding.check_metadata.Severity.upper())
- finding_output.Title = finding.check_metadata.CheckTitle
- finding_output.Description = finding.check_metadata.Description
- finding_output.Resources = [
- Resource(
- Id=finding.resource_id,
- Type=finding.check_metadata.ResourceType,
- Partition=audit_info.audited_partition,
- Region=finding.region,
- )
- ]
- # Add ED to PASS or FAIL (PASSED/FAILED)
- finding_output.Compliance = Compliance(
- Status=finding.status + "ED",
- RelatedRequirements=finding.check_metadata.CheckType,
- )
- finding_output.Remediation = {
- "Recommendation": finding.check_metadata.Remediation.Recommendation
- }
-
- return finding_output
-
-
-def close_json(output_filename, output_directory, mode):
- try:
- suffix = json_file_suffix
- if mode == "json-asff":
- suffix = json_asff_file_suffix
- filename = f"{output_directory}/{output_filename}{suffix}"
- file_descriptor = open_file(
- filename,
- "a",
- )
- # Replace last comma for square bracket if not empty
- if file_descriptor.tell() > 0:
- file_descriptor.seek(file_descriptor.tell() - 1, os.SEEK_SET)
- file_descriptor.truncate()
- file_descriptor.write("]")
- file_descriptor.close()
- except Exception as error:
- logger.critical(
- f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
- )
- sys.exit()
-
-
def send_to_s3_bucket(
output_filename, output_directory, output_mode, output_bucket, audit_session
):
@@ -515,578 +201,6 @@ def send_to_s3_bucket(
sys.exit()
-def display_summary_table(
- findings: list,
- audit_info,
- output_options: Provider_Output_Options,
- provider: str,
-):
- output_directory = output_options.output_directory
- output_filename = output_options.output_filename
- try:
- if provider == "aws":
- entity_type = "Account"
- audited_entities = audit_info.audited_account
- elif provider == "azure":
- if audit_info.identity.domain:
- entity_type = "Tenant Domain"
- audited_entities = audit_info.identity.domain
- else:
- entity_type = "Tenant ID/s"
- audited_entities = " ".join(audit_info.identity.tenant_ids)
-
- if findings:
- current = {
- "Service": "",
- "Provider": "",
- "Total": 0,
- "Critical": 0,
- "High": 0,
- "Medium": 0,
- "Low": 0,
- }
- findings_table = {
- "Provider": [],
- "Service": [],
- "Status": [],
- "Critical": [],
- "High": [],
- "Medium": [],
- "Low": [],
- }
- pass_count = fail_count = 0
- for finding in findings:
- # If new service and not first, add previous row
- if (
- current["Service"] != finding.check_metadata.ServiceName
- and current["Service"]
- ):
-
- add_service_to_table(findings_table, current)
-
- current["Total"] = current["Critical"] = current["High"] = current[
- "Medium"
- ] = current["Low"] = 0
-
- current["Service"] = finding.check_metadata.ServiceName
- current["Provider"] = finding.check_metadata.Provider
-
- current["Total"] += 1
- if finding.status == "PASS":
- pass_count += 1
- elif finding.status == "FAIL":
- fail_count += 1
- if finding.check_metadata.Severity == "critical":
- current["Critical"] += 1
- elif finding.check_metadata.Severity == "high":
- current["High"] += 1
- elif finding.check_metadata.Severity == "medium":
- current["Medium"] += 1
- elif finding.check_metadata.Severity == "low":
- current["Low"] += 1
-
- # Add final service
-
- add_service_to_table(findings_table, current)
-
- print("\nOverview Results:")
- overview_table = [
- [
- f"{Fore.RED}{round(fail_count/len(findings)*100, 2)}% ({fail_count}) Failed{Style.RESET_ALL}",
- f"{Fore.GREEN}{round(pass_count/len(findings)*100, 2)}% ({pass_count}) Passed{Style.RESET_ALL}",
- ]
- ]
- print(tabulate(overview_table, tablefmt="rounded_grid"))
-
- print(
- f"\n{entity_type} {Fore.YELLOW}{audited_entities}{Style.RESET_ALL} Scan Results (severity columns are for fails only):"
- )
- if provider == "azure":
- print(
- f"\nSubscriptions scanned: {Fore.YELLOW}{' '.join(audit_info.identity.subscriptions.keys())}{Style.RESET_ALL}"
- )
- print(tabulate(findings_table, headers="keys", tablefmt="rounded_grid"))
- print(
- f"{Style.BRIGHT}* You only see here those services that contains resources.{Style.RESET_ALL}"
- )
- print("\nDetailed results are in:")
- if "html" in output_options.output_modes:
- print(f" - HTML: {output_directory}/{output_filename}.html")
- if "json-asff" in output_options.output_modes:
- print(f" - JSON-ASFF: {output_directory}/{output_filename}.asff.json")
- if "csv" in output_options.output_modes:
- print(f" - CSV: {output_directory}/{output_filename}.csv")
- if "json" in output_options.output_modes:
- print(f" - JSON: {output_directory}/{output_filename}.json")
-
- else:
- print(
- f"\n {Style.BRIGHT}There are no findings in {entity_type} {Fore.YELLOW}{audited_entities}{Style.RESET_ALL}\n"
- )
-
- except Exception as error:
- logger.critical(
- f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
- )
- sys.exit()
-
-
-def add_service_to_table(findings_table, current):
- if (
- current["Critical"] > 0
- or current["High"] > 0
- or current["Medium"] > 0
- or current["Low"] > 0
- ):
- total_fails = (
- current["Critical"] + current["High"] + current["Medium"] + current["Low"]
- )
- current["Status"] = f"{Fore.RED}FAIL ({total_fails}){Style.RESET_ALL}"
- else:
- current["Status"] = f"{Fore.GREEN}PASS ({current['Total']}){Style.RESET_ALL}"
- findings_table["Provider"].append(current["Provider"])
- findings_table["Service"].append(current["Service"])
- findings_table["Status"].append(current["Status"])
- findings_table["Critical"].append(
- f"{Fore.LIGHTRED_EX}{current['Critical']}{Style.RESET_ALL}"
- )
- findings_table["High"].append(f"{Fore.RED}{current['High']}{Style.RESET_ALL}")
- findings_table["Medium"].append(
- f"{Fore.YELLOW}{current['Medium']}{Style.RESET_ALL}"
- )
- findings_table["Low"].append(f"{Fore.BLUE}{current['Low']}{Style.RESET_ALL}")
-
-
-def display_compliance_table(
- findings: list,
- bulk_checks_metadata: dict,
- compliance_framework: str,
- output_filename: str,
- output_directory: str,
-):
- try:
- if "ens_rd2022_aws" in compliance_framework:
- marcos = {}
- ens_compliance_table = {
- "Proveedor": [],
- "Marco/Categoria": [],
- "Estado": [],
- "PYTEC": [],
- "Alto": [],
- "Medio": [],
- "Bajo": [],
- }
- pass_count = fail_count = 0
- for finding in findings:
- check = bulk_checks_metadata[finding.check_metadata.CheckID]
- check_compliances = check.Compliance
- for compliance in check_compliances:
- if (
- compliance.Framework == "ENS"
- and compliance.Provider == "AWS"
- and compliance.Version == "RD2022"
- ):
- compliance_version = compliance.Version
- compliance_fm = compliance.Framework
- compliance_provider = compliance.Provider
- for requirement in compliance.Requirements:
- for attribute in requirement.Attributes:
- marco_categoria = (
- f"{attribute['Marco']}/{attribute['Categoria']}"
- )
- # Check if Marco/Categoria exists
- if marco_categoria not in marcos:
- marcos[marco_categoria] = {
- "Estado": f"{Fore.GREEN}CUMPLE{Style.RESET_ALL}",
- "Pytec": 0,
- "Alto": 0,
- "Medio": 0,
- "Bajo": 0,
- }
- if finding.status == "FAIL":
- fail_count += 1
- marcos[marco_categoria][
- "Estado"
- ] = f"{Fore.RED}NO CUMPLE{Style.RESET_ALL}"
- elif finding.status == "PASS":
- pass_count += 1
- if attribute["Nivel"] == "pytec":
- marcos[marco_categoria]["Pytec"] += 1
- elif attribute["Nivel"] == "alto":
- marcos[marco_categoria]["Alto"] += 1
- elif attribute["Nivel"] == "medio":
- marcos[marco_categoria]["Medio"] += 1
- elif attribute["Nivel"] == "bajo":
- marcos[marco_categoria]["Bajo"] += 1
-
- # Add results to table
- for marco in marcos:
- ens_compliance_table["Proveedor"].append("aws")
- ens_compliance_table["Marco/Categoria"].append(marco)
- ens_compliance_table["Estado"].append(marcos[marco]["Estado"])
- ens_compliance_table["PYTEC"].append(
- f"{Fore.LIGHTRED_EX}{marcos[marco]['Pytec']}{Style.RESET_ALL}"
- )
- ens_compliance_table["Alto"].append(
- f"{Fore.RED}{marcos[marco]['Alto']}{Style.RESET_ALL}"
- )
- ens_compliance_table["Medio"].append(
- f"{Fore.YELLOW}{marcos[marco]['Medio']}{Style.RESET_ALL}"
- )
- ens_compliance_table["Bajo"].append(
- f"{Fore.BLUE}{marcos[marco]['Bajo']}{Style.RESET_ALL}"
- )
- if fail_count + pass_count < 0:
- print(
- f"\n {Style.BRIGHT}There are no resources for {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}.\n"
- )
- else:
- print(
- f"\nEstado de Cumplimiento de {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}:"
- )
- overview_table = [
- [
- f"{Fore.RED}{round(fail_count/(fail_count+pass_count)*100, 2)}% ({fail_count}) NO CUMPLE{Style.RESET_ALL}",
- f"{Fore.GREEN}{round(pass_count/(fail_count+pass_count)*100, 2)}% ({pass_count}) CUMPLE{Style.RESET_ALL}",
- ]
- ]
- print(tabulate(overview_table, tablefmt="rounded_grid"))
- print(
- f"\nResultados de {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}:"
- )
- print(
- tabulate(
- ens_compliance_table, headers="keys", tablefmt="rounded_grid"
- )
- )
- print(
- f"{Style.BRIGHT}* Solo aparece el Marco/Categoria que contiene resultados.{Style.RESET_ALL}"
- )
- print("\nResultados detallados en:")
- print(
- f" - CSV: {output_directory}/{output_filename}_{compliance_framework[0]}.csv\n"
- )
- if "cis" in str(compliance_framework):
- sections = {}
- cis_compliance_table = {
- "Provider": [],
- "Section": [],
- "Level 1": [],
- "Level 2": [],
- }
- pass_count = fail_count = 0
- for finding in findings:
- check = bulk_checks_metadata[finding.check_metadata.CheckID]
- check_compliances = check.Compliance
- for compliance in check_compliances:
- if compliance.Framework == "CIS-AWS" and compliance.Version in str(
- compliance_framework
- ):
- compliance_version = compliance.Version
- compliance_fm = compliance.Framework
- for requirement in compliance.Requirements:
- for attribute in requirement.Attributes:
- section = attribute["Section"]
- # Check if Section exists
- if section not in sections:
- sections[section] = {
- "Status": f"{Fore.GREEN}PASS{Style.RESET_ALL}",
- "Level 1": {"FAIL": 0, "PASS": 0},
- "Level 2": {"FAIL": 0, "PASS": 0},
- }
- if finding.status == "FAIL":
- fail_count += 1
- elif finding.status == "PASS":
- pass_count += 1
- if attribute["Profile"] == "Level 1":
- if finding.status == "FAIL":
- sections[section]["Level 1"]["FAIL"] += 1
- else:
- sections[section]["Level 1"]["PASS"] += 1
- elif attribute["Profile"] == "Level 2":
- if finding.status == "FAIL":
- sections[section]["Level 2"]["FAIL"] += 1
- else:
- sections[section]["Level 2"]["PASS"] += 1
-
- # Add results to table
- sections = dict(sorted(sections.items()))
- for section in sections:
- cis_compliance_table["Provider"].append("aws")
- cis_compliance_table["Section"].append(section)
- if sections[section]["Level 1"]["FAIL"] > 0:
- cis_compliance_table["Level 1"].append(
- f"{Fore.RED}FAIL({sections[section]['Level 1']['FAIL']}){Style.RESET_ALL}"
- )
- else:
- cis_compliance_table["Level 1"].append(
- f"{Fore.GREEN}PASS({sections[section]['Level 1']['PASS']}){Style.RESET_ALL}"
- )
- if sections[section]["Level 2"]["FAIL"] > 0:
- cis_compliance_table["Level 2"].append(
- f"{Fore.RED}FAIL({sections[section]['Level 2']['FAIL']}){Style.RESET_ALL}"
- )
- else:
- cis_compliance_table["Level 2"].append(
- f"{Fore.GREEN}PASS({sections[section]['Level 2']['PASS']}){Style.RESET_ALL}"
- )
- if fail_count + pass_count < 0:
- print(
- f"\n {Style.BRIGHT}There are no resources for {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL}.\n"
- )
- else:
- print(
- f"\nCompliance Status of {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL} Framework:"
- )
- overview_table = [
- [
- f"{Fore.RED}{round(fail_count/(fail_count+pass_count)*100, 2)}% ({fail_count}) FAIL{Style.RESET_ALL}",
- f"{Fore.GREEN}{round(pass_count/(fail_count+pass_count)*100, 2)}% ({pass_count}) PASS{Style.RESET_ALL}",
- ]
- ]
- print(tabulate(overview_table, tablefmt="rounded_grid"))
- print(
- f"\nFramework {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL} Results:"
- )
- print(
- tabulate(
- cis_compliance_table, headers="keys", tablefmt="rounded_grid"
- )
- )
- print(
- f"{Style.BRIGHT}* Only sections containing results appear.{Style.RESET_ALL}"
- )
- print("\nDetailed Results in:")
- print(
- f" - CSV: {output_directory}/{output_filename}_{compliance_framework[0]}.csv\n"
- )
- except Exception as error:
- logger.critical(
- f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
- )
- sys.exit()
-
-
-def add_html_header(file_descriptor, audit_info):
- try:
- if not audit_info.profile:
- audit_info.profile = "ENV"
- if isinstance(audit_info.audited_regions, list):
- audited_regions = " ".join(audit_info.audited_regions)
- elif not audit_info.audited_regions:
- audited_regions = "All Regions"
- else:
- audited_regions = audit_info.audited_regions
- file_descriptor.write(
- """
-
-
-
-
-
-
-
-
-
-
-
-
-
- Prowler - The Handy Cloud Security Tool
-
-
-
-
-
-
![prowler-logo]()
-
-
-
- -
-
-
- Version: """
- + prowler_version
- + """
-
-
-
- -
- Parameters used: """
- + " ".join(sys.argv[1:])
- + """
-
- -
- Date: """
- + timestamp.isoformat()
- + """
-
-
-
-
-
-
-
-
- -
- AWS Account: """
- + audit_info.audited_account
- + """
-
- -
- AWS-CLI Profile: """
- + audit_info.profile
- + """
-
- -
- Audited Regions: """
- + audited_regions
- + """
-
-
-
-
-
-
-
-
- -
- User Id: """
- + audit_info.audited_user_id
- + """
-
- -
- Caller Identity ARN:
- """
- + audit_info.audited_identity_arn
- + """
-
-
-
-
-
-
-
-
- -
- Total Findings: TOTAL_FINDINGS
-
- -
- Passed: TOTAL_PASS
-
- -
- Failed: TOTAL_FAIL
-
- -
- Total Resources: TOTAL_RESOURCES
-
-
-
-
-
-
-
-
-
-
-
- | Status |
- Severity |
- Service Name |
- Region |
- Check Title |
- Resource ID |
- Check Description |
- Check ID |
- Status Extended |
- Risk |
- Recomendation |
- Recomendation URL |
-
-
-
- """
- )
- except Exception as error:
- logger.error(
- f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
- )
-
-
-def fill_html(file_descriptor, finding):
- row_class = "p-3 mb-2 bg-success-custom"
- if finding.status == "INFO":
- row_class = "table-info"
- elif finding.status == "FAIL":
- row_class = "table-danger"
- elif finding.status == "WARNING":
- row_class = "table-warning"
- file_descriptor.write(
- f"""
-
- | {finding.status} |
- {finding.check_metadata.Severity} |
- {finding.check_metadata.ServiceName} |
- {finding.region} |
- {finding.check_metadata.CheckTitle} |
- {finding.resource_id.replace("<", "<").replace(">", ">").replace("_", "_")} |
- {finding.check_metadata.Description} |
- {finding.check_metadata.CheckID.replace("_", "_")} |
- {finding.status_extended} |
- {finding.check_metadata.Risk} |
- {finding.check_metadata.Remediation.Recommendation.Text} |
- |
-
- """
- )
-
-
def extract_findings_statistics(findings: list) -> dict:
stats = {}
total_pass = 0
@@ -1110,133 +224,3 @@ def extract_findings_statistics(findings: list) -> dict:
stats["findings_count"] = findings_count
return stats
-
-
-def fill_html_overview_statistics(stats, output_filename, output_directory):
- try:
- filename = f"{output_directory}/{output_filename}{html_file_suffix}"
- # Read file
- with open(filename, "r") as file:
- filedata = file.read()
-
- # Replace statistics
- # TOTAL_FINDINGS
- filedata = filedata.replace("TOTAL_FINDINGS", str(stats.get("findings_count")))
- # TOTAL_RESOURCES
- filedata = filedata.replace(
- "TOTAL_RESOURCES", str(stats.get("resources_count"))
- )
- # TOTAL_PASS
- filedata = filedata.replace("TOTAL_PASS", str(stats.get("total_pass")))
- # TOTAL_FAIL
- filedata = filedata.replace("TOTAL_FAIL", str(stats.get("total_fail")))
- # Write file
- with open(filename, "w") as file:
- file.write(filedata)
-
- except Exception as error:
- logger.critical(
- f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
- )
- sys.exit()
-
-
-def add_html_footer(output_filename, output_directory):
- try:
- filename = f"{output_directory}/{output_filename}{html_file_suffix}"
- file_descriptor = open_file(
- filename,
- "a",
- )
- file_descriptor.write(
- """
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-"""
- )
- file_descriptor.close()
- except Exception as error:
- logger.critical(
- f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
- )
- sys.exit()
diff --git a/prowler/lib/outputs/summary_table.py b/prowler/lib/outputs/summary_table.py
new file mode 100644
index 00000000..7a93b1b9
--- /dev/null
+++ b/prowler/lib/outputs/summary_table.py
@@ -0,0 +1,149 @@
+import sys
+
+from colorama import Fore, Style
+from tabulate import tabulate
+
+from prowler.lib.logger import logger
+from prowler.providers.common.outputs import Provider_Output_Options
+
+
+def display_summary_table(
+ findings: list,
+ audit_info,
+ output_options: Provider_Output_Options,
+ provider: str,
+):
+ output_directory = output_options.output_directory
+ output_filename = output_options.output_filename
+ try:
+ if provider == "aws":
+ entity_type = "Account"
+ audited_entities = audit_info.audited_account
+ elif provider == "azure":
+ if audit_info.identity.domain:
+ entity_type = "Tenant Domain"
+ audited_entities = audit_info.identity.domain
+ else:
+ entity_type = "Tenant ID/s"
+ audited_entities = " ".join(audit_info.identity.tenant_ids)
+
+ if findings:
+ current = {
+ "Service": "",
+ "Provider": "",
+ "Total": 0,
+ "Critical": 0,
+ "High": 0,
+ "Medium": 0,
+ "Low": 0,
+ }
+ findings_table = {
+ "Provider": [],
+ "Service": [],
+ "Status": [],
+ "Critical": [],
+ "High": [],
+ "Medium": [],
+ "Low": [],
+ }
+ pass_count = fail_count = 0
+ for finding in findings:
+ # If new service and not first, add previous row
+ if (
+ current["Service"] != finding.check_metadata.ServiceName
+ and current["Service"]
+ ):
+
+ add_service_to_table(findings_table, current)
+
+ current["Total"] = current["Critical"] = current["High"] = current[
+ "Medium"
+ ] = current["Low"] = 0
+
+ current["Service"] = finding.check_metadata.ServiceName
+ current["Provider"] = finding.check_metadata.Provider
+
+ current["Total"] += 1
+ if finding.status == "PASS":
+ pass_count += 1
+ elif finding.status == "FAIL":
+ fail_count += 1
+ if finding.check_metadata.Severity == "critical":
+ current["Critical"] += 1
+ elif finding.check_metadata.Severity == "high":
+ current["High"] += 1
+ elif finding.check_metadata.Severity == "medium":
+ current["Medium"] += 1
+ elif finding.check_metadata.Severity == "low":
+ current["Low"] += 1
+
+ # Add final service
+
+ add_service_to_table(findings_table, current)
+
+ print("\nOverview Results:")
+ overview_table = [
+ [
+ f"{Fore.RED}{round(fail_count/len(findings)*100, 2)}% ({fail_count}) Failed{Style.RESET_ALL}",
+ f"{Fore.GREEN}{round(pass_count/len(findings)*100, 2)}% ({pass_count}) Passed{Style.RESET_ALL}",
+ ]
+ ]
+ print(tabulate(overview_table, tablefmt="rounded_grid"))
+
+ print(
+ f"\n{entity_type} {Fore.YELLOW}{audited_entities}{Style.RESET_ALL} Scan Results (severity columns are for fails only):"
+ )
+ if provider == "azure":
+ print(
+ f"\nSubscriptions scanned: {Fore.YELLOW}{' '.join(audit_info.identity.subscriptions.keys())}{Style.RESET_ALL}"
+ )
+ print(tabulate(findings_table, headers="keys", tablefmt="rounded_grid"))
+ print(
+ f"{Style.BRIGHT}* You only see here those services that contains resources.{Style.RESET_ALL}"
+ )
+ print("\nDetailed results are in:")
+ if "html" in output_options.output_modes:
+ print(f" - HTML: {output_directory}/{output_filename}.html")
+ if "json-asff" in output_options.output_modes:
+ print(f" - JSON-ASFF: {output_directory}/{output_filename}.asff.json")
+ if "csv" in output_options.output_modes:
+ print(f" - CSV: {output_directory}/{output_filename}.csv")
+ if "json" in output_options.output_modes:
+ print(f" - JSON: {output_directory}/{output_filename}.json")
+
+ else:
+ print(
+ f"\n {Style.BRIGHT}There are no findings in {entity_type} {Fore.YELLOW}{audited_entities}{Style.RESET_ALL}\n"
+ )
+
+ except Exception as error:
+ logger.critical(
+ f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
+ )
+ sys.exit()
+
+
+def add_service_to_table(findings_table, current):
+ if (
+ current["Critical"] > 0
+ or current["High"] > 0
+ or current["Medium"] > 0
+ or current["Low"] > 0
+ ):
+ total_fails = (
+ current["Critical"] + current["High"] + current["Medium"] + current["Low"]
+ )
+ current["Status"] = f"{Fore.RED}FAIL ({total_fails}){Style.RESET_ALL}"
+ else:
+ current["Status"] = f"{Fore.GREEN}PASS ({current['Total']}){Style.RESET_ALL}"
+ findings_table["Provider"].append(current["Provider"])
+ findings_table["Service"].append(current["Service"])
+ findings_table["Status"].append(current["Status"])
+ findings_table["Critical"].append(
+ f"{Fore.LIGHTRED_EX}{current['Critical']}{Style.RESET_ALL}"
+ )
+ findings_table["High"].append(f"{Fore.RED}{current['High']}{Style.RESET_ALL}")
+ findings_table["Medium"].append(
+ f"{Fore.YELLOW}{current['Medium']}{Style.RESET_ALL}"
+ )
+ findings_table["Low"].append(f"{Fore.BLUE}{current['Low']}{Style.RESET_ALL}")
diff --git a/prowler/providers/aws/lib/security_hub/security_hub.py b/prowler/providers/aws/lib/security_hub/security_hub.py
index 1514fad8..62d70b74 100644
--- a/prowler/providers/aws/lib/security_hub/security_hub.py
+++ b/prowler/providers/aws/lib/security_hub/security_hub.py
@@ -4,7 +4,11 @@ from operator import itemgetter
from boto3 import session
-from prowler.config.config import json_asff_file_suffix, timestamp_utc
+from prowler.config.config import (
+ json_asff_file_suffix,
+ output_file_timestamp,
+ timestamp_utc,
+)
from prowler.lib.logger import logger
from prowler.lib.outputs.models import Check_Output_JSON_ASFF
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
@@ -48,7 +52,7 @@ def resolve_security_hub_previous_findings(
logger.info("Checking previous findings in Security Hub to archive them.")
# Read current findings from json-asff file
with open(
- f"{output_directory}/prowler-output-{audit_info.audited_account}-{json_asff_file_suffix}"
+ f"{output_directory}/prowler-output-{audit_info.audited_account}-{output_file_timestamp}{json_asff_file_suffix}"
) as f:
json_asff_file = json.load(f)
diff --git a/prowler/providers/aws/services/accessanalyzer/accessanalyzer_service.py b/prowler/providers/aws/services/accessanalyzer/accessanalyzer_service.py
index 5294751c..3a59a12e 100644
--- a/prowler/providers/aws/services/accessanalyzer/accessanalyzer_service.py
+++ b/prowler/providers/aws/services/accessanalyzer/accessanalyzer_service.py
@@ -52,7 +52,7 @@ class AccessAnalyzer:
self.analyzers.append(
Analyzer(
"",
- "",
+ self.audited_account,
"NOT_AVAILABLE",
"",
"",
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py
index d657ff4c..95ee7aa8 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured.py
@@ -20,7 +20,7 @@ class cloudwatch_changes_to_network_acls_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py
index 957e253c..3f6f0ac7 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured.py
@@ -20,7 +20,7 @@ class cloudwatch_changes_to_network_gateways_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py
index 0392374a..11dcedf0 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured.py
@@ -20,7 +20,7 @@ class cloudwatch_changes_to_network_route_tables_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py
index bc384e8e..a6c678ad 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured.py
@@ -20,7 +20,7 @@ class cloudwatch_changes_to_vpcs_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py
index 3ca8a341..830a4c93 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled.py
@@ -22,7 +22,7 @@ class cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_change
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py
index e9925830..f7929978 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled.py
@@ -22,7 +22,7 @@ class cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_change
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py
index 026c366b..523d5414 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_authentication_failures(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py
index b0bf2dce..c543ef81 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_aws_organizations_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py
index 4e2177e4..821041d5 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk(Chec
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py
index 9a9b5775..5683e6f0 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_for_s3_bucket_policy_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py
index e7d01596..4ef88481 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_policy_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py
index 2bad8a44..ec5fae22 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_root_usage(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py
index 4c5dcf43..177cc548 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_security_group_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py
index 91fc738b..607374e0 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_sign_in_without_mfa(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py
index d66c72fd..69a9b77b 100644
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls.py
@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_unauthorized_api_calls(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
- report.resource_id = ""
+ report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:
diff --git a/prowler/providers/aws/services/config/config_service.py b/prowler/providers/aws/services/config/config_service.py
index 3b84c533..d617f54b 100644
--- a/prowler/providers/aws/services/config/config_service.py
+++ b/prowler/providers/aws/services/config/config_service.py
@@ -57,7 +57,7 @@ class Config:
else:
self.recorders.append(
Recorder(
- None,
+ self.audited_account,
None,
None,
regional_client.region,
diff --git a/prowler/providers/aws/services/secretsmanager/secretsmanager_service.py b/prowler/providers/aws/services/secretsmanager/secretsmanager_service.py
index c5de04a9..99354dee 100644
--- a/prowler/providers/aws/services/secretsmanager/secretsmanager_service.py
+++ b/prowler/providers/aws/services/secretsmanager/secretsmanager_service.py
@@ -38,8 +38,11 @@ class SecretsManager:
arn=secret["ARN"],
name=secret["Name"],
region=regional_client.region,
- rotation_enabled=secret["RotationEnabled"],
)
+ if "RotationEnabled" in secret:
+ self.secrets[secret["Name"]].rotation_enabled = secret[
+ "RotationEnabled"
+ ]
except Exception as error:
logger.error(
@@ -53,4 +56,4 @@ class Secret(BaseModel):
arn: str
name: str
region: str
- rotation_enabled: bool
+ rotation_enabled: bool = False
diff --git a/tests/lib/outputs/outputs_test.py b/tests/lib/outputs/outputs_test.py
index 99dbe073..aadbc106 100644
--- a/tests/lib/outputs/outputs_test.py
+++ b/tests/lib/outputs/outputs_test.py
@@ -16,7 +16,10 @@ from prowler.config.config import (
timestamp_utc,
)
from prowler.lib.check.models import Check_Report, load_check_metadata
+from prowler.lib.outputs.file_descriptors import fill_file_descriptors
+from prowler.lib.outputs.json import fill_json_asff
from prowler.lib.outputs.models import (
+ generate_csv_fields,
Check_Output_CSV,
Check_Output_JSON_ASFF,
Compliance,
@@ -25,9 +28,6 @@ from prowler.lib.outputs.models import (
Severity,
)
from prowler.lib.outputs.outputs import (
- fill_file_descriptors,
- fill_json_asff,
- generate_csv_fields,
send_to_s3_bucket,
set_report_color,
)
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured_test.py
index f51c8302..69f452df 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_acls_alarm_configured/cloudwatch_changes_to_network_acls_alarm_configured_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured_test.py
index 0f0bcdc1..66ca7eeb 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_gateways_alarm_configured/cloudwatch_changes_to_network_gateways_alarm_configured_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured_test.py
index 5a016dda..ed1e028b 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_network_route_tables_alarm_configured/cloudwatch_changes_to_network_route_tables_alarm_configured_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured_test.py
index af7dc1b2..9aa20845 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_changes_to_vpcs_alarm_configured/cloudwatch_changes_to_vpcs_alarm_configured_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled_test.py
index 9bbf7297..4f8e8a8a 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_changes_enabled_test.py
@@ -49,7 +49,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -98,7 +98,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -153,7 +153,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled_test.py
index 8830df8a..aa278ba8 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled/cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_changes_enabled_test.py
@@ -49,7 +49,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -98,7 +98,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -153,7 +153,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures_test.py
index 8c800b1e..120e9cac 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_authentication_failures/cloudwatch_log_metric_filter_authentication_failures_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes_test.py
index de96c7dd..a8d808b3 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_aws_organizations_changes/cloudwatch_log_metric_filter_aws_organizations_changes_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_aws_organizations_changes:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_aws_organizations_changes:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_aws_organizations_changes:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk_test.py
index abf4b5b8..eccbcf01 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk/cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk_test.py
@@ -49,7 +49,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -98,7 +98,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -153,7 +153,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes_test.py
index 7c60372d..020ba39c 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes/cloudwatch_log_metric_filter_for_s3_bucket_policy_changes_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes_test.py
index 3e285152..23cbfb40 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_policy_changes/cloudwatch_log_metric_filter_policy_changes_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage_test.py
index f62f37d9..266676c9 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_root_usage/cloudwatch_log_metric_filter_root_usage_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes_test.py
index f35f193f..eea0cddb 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_security_group_changes/cloudwatch_log_metric_filter_security_group_changes_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa_test.py
index a9866fb3..4b05b39c 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_sign_in_without_mfa/cloudwatch_log_metric_filter_sign_in_without_mfa_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls_test.py
index 95e3459e..27b1e6f2 100644
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_metric_filter_unauthorized_api_calls/cloudwatch_log_metric_filter_unauthorized_api_calls_test.py
@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
- assert result[0].resource_id == ""
+ assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail