diff --git a/config/config.py b/config/config.py index 71623eec..02a8e57c 100644 --- a/config/config.py +++ b/config/config.py @@ -8,6 +8,10 @@ from lib.logger import logger timestamp = datetime.today() timestamp_utc = datetime.now(timezone.utc).replace(tzinfo=timezone.utc) prowler_version = "3.0-beta-21Nov2022" +html_logo_url = "https://github.com/prowler-cloud/prowler/" +html_logo_img = ( + "https://github.com/prowler-cloud/prowler/raw/master/util/html/prowler-logo-new.png" +) orange_color = "\033[38;5;208m" banner_color = "\033[1;92m" @@ -25,6 +29,7 @@ timestamp_iso = timestamp.isoformat() csv_file_suffix = ".csv" json_file_suffix = ".json" json_asff_file_suffix = ".asff.json" +html_file_suffix = ".html" config_yaml = "providers/aws/config.yaml" diff --git a/lib/outputs/outputs.py b/lib/outputs/outputs.py index 860dd2da..b08f7fbf 100644 --- a/lib/outputs/outputs.py +++ b/lib/outputs/outputs.py @@ -10,14 +10,19 @@ from tabulate import tabulate from config.config import ( csv_file_suffix, + html_file_suffix, + html_logo_img, + html_logo_url, json_asff_file_suffix, json_file_suffix, orange_color, + output_file_timestamp, prowler_version, timestamp, timestamp_iso, timestamp_utc, ) +from lib.check.models import Output_From_Options from lib.logger import logger from lib.outputs.models import ( Check_Output_CSV, @@ -31,223 +36,264 @@ from lib.outputs.models import ( ) from lib.utils.utils import file_exists, hash_sha512, open_file from providers.aws.lib.allowlist.allowlist import is_allowlisted +from providers.aws.lib.audit_info.models import AWS_Audit_Info from providers.aws.lib.security_hub.security_hub import send_to_security_hub def report(check_findings, output_options, audit_info): - # Sort check findings - check_findings.sort(key=lambda x: x.region) + try: + # Sort check findings + check_findings.sort(key=lambda x: x.region) - # Generate the required output files - # csv_fields = [] - file_descriptors = {} - if output_options.output_modes: - # We have to create the required output files - file_descriptors = fill_file_descriptors( - output_options.output_modes, - output_options.output_directory, - output_options.output_filename, - ) + # Generate the required output files + # csv_fields = [] + file_descriptors = {} + if output_options.output_modes: + # We have to create the required output files + file_descriptors = fill_file_descriptors( + output_options.output_modes, + output_options.output_directory, + output_options.output_filename, + audit_info, + ) - if check_findings: - for finding in check_findings: - # Check if finding is allowlisted - if output_options.allowlist_file: - if is_allowlisted( - output_options.allowlist_file, - audit_info.audited_account, - finding.check_metadata.CheckID, - finding.region, - finding.resource_id, - ): - finding.status = "WARNING" - # Print findings by stdout - color = set_report_color(finding.status) - if output_options.is_quiet and "FAIL" in finding.status: - print( - f"\t{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}" - ) - elif not output_options.is_quiet and output_options.verbose: - print( - f"\t{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}" - ) - if file_descriptors: - if finding.check_metadata.Provider == "aws": - if "ens_rd2022_aws" in output_options.output_modes: - # We have to retrieve all the check's compliance requirements - check_compliance = output_options.bulk_checks_metadata[ - finding.check_metadata.CheckID - ].Compliance - for compliance in check_compliance: - if ( - compliance.Framework == "ENS" - and compliance.Version == "RD2022" - ): - for requirement in compliance.Requirements: - requirement_description = requirement.Description - requirement_id = requirement.Id - for attribute in requirement.Attributes: - compliance_row = Check_Output_CSV_ENS_RD2022( - Provider=finding.check_metadata.Provider, - AccountId=audit_info.audited_account, - Region=finding.region, - AssessmentDate=timestamp.isoformat(), - Requirements_Id=requirement_id, - Requirements_Description=requirement_description, - Requirements_Attributes_IdGrupoControl=attribute.get( - "IdGrupoControl" - ), - Requirements_Attributes_Marco=attribute.get( - "Marco" - ), - Requirements_Attributes_Categoria=attribute.get( - "Categoria" - ), - Requirements_Attributes_DescripcionControl=attribute.get( - "DescripcionControl" - ), - Requirements_Attributes_Nivel=attribute.get( - "Nivel" - ), - Requirements_Attributes_Tipo=attribute.get( - "Tipo" - ), - Requirements_Attributes_Dimensiones=",".join( - attribute.get("Dimensiones") - ), - Status=finding.status, - StatusExtended=finding.status_extended, - ResourceId=finding.resource_id, - CheckId=finding.check_metadata.CheckID, + if check_findings: + for finding in check_findings: + # Check if finding is allowlisted + if output_options.allowlist_file: + if is_allowlisted( + output_options.allowlist_file, + audit_info.audited_account, + finding.check_metadata.CheckID, + finding.region, + finding.resource_id, + ): + finding.status = "WARNING" + # Print findings by stdout + color = set_report_color(finding.status) + if output_options.is_quiet and "FAIL" in finding.status: + print( + f"\t{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}" + ) + elif not output_options.is_quiet and output_options.verbose: + print( + f"\t{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}" + ) + if file_descriptors: + if finding.check_metadata.Provider == "aws": + if "ens_rd2022_aws" in output_options.output_modes: + # We have to retrieve all the check's compliance requirements + check_compliance = output_options.bulk_checks_metadata[ + finding.check_metadata.CheckID + ].Compliance + for compliance in check_compliance: + if ( + compliance.Framework == "ENS" + and compliance.Version == "RD2022" + ): + for requirement in compliance.Requirements: + requirement_description = ( + requirement.Description ) + requirement_id = requirement.Id + for attribute in requirement.Attributes: + compliance_row = Check_Output_CSV_ENS_RD2022( + Provider=finding.check_metadata.Provider, + AccountId=audit_info.audited_account, + Region=finding.region, + AssessmentDate=timestamp.isoformat(), + Requirements_Id=requirement_id, + Requirements_Description=requirement_description, + Requirements_Attributes_IdGrupoControl=attribute.get( + "IdGrupoControl" + ), + Requirements_Attributes_Marco=attribute.get( + "Marco" + ), + Requirements_Attributes_Categoria=attribute.get( + "Categoria" + ), + Requirements_Attributes_DescripcionControl=attribute.get( + "DescripcionControl" + ), + Requirements_Attributes_Nivel=attribute.get( + "Nivel" + ), + Requirements_Attributes_Tipo=attribute.get( + "Tipo" + ), + Requirements_Attributes_Dimensiones=",".join( + attribute.get("Dimensiones") + ), + Status=finding.status, + StatusExtended=finding.status_extended, + ResourceId=finding.resource_id, + CheckId=finding.check_metadata.CheckID, + ) - csv_header = generate_csv_fields( - Check_Output_CSV_ENS_RD2022 - ) - csv_writer = DictWriter( - file_descriptors["ens_rd2022_aws"], - fieldnames=csv_header, - delimiter=";", - ) - csv_writer.writerow(compliance_row.__dict__) + csv_header = generate_csv_fields( + Check_Output_CSV_ENS_RD2022 + ) + csv_writer = DictWriter( + file_descriptors["ens_rd2022_aws"], + fieldnames=csv_header, + delimiter=";", + ) + csv_writer.writerow(compliance_row.__dict__) - if "csv" in file_descriptors: - finding_output = Check_Output_CSV( - audit_info.audited_account, - audit_info.profile, - finding, - audit_info.organizations_metadata, - ) - csv_writer = DictWriter( - file_descriptors["csv"], - fieldnames=generate_csv_fields(Check_Output_CSV), - delimiter=";", - ) - csv_writer.writerow(finding_output.__dict__) + if "csv" in file_descriptors: + finding_output = Check_Output_CSV( + audit_info.audited_account, + audit_info.profile, + finding, + audit_info.organizations_metadata, + ) + csv_writer = DictWriter( + file_descriptors["csv"], + fieldnames=generate_csv_fields(Check_Output_CSV), + delimiter=";", + ) + csv_writer.writerow(finding_output.__dict__) - if "json" in file_descriptors: - finding_output = Check_Output_JSON( - **finding.check_metadata.dict() - ) - fill_json(finding_output, audit_info, finding) + if "json" in file_descriptors: + finding_output = Check_Output_JSON( + **finding.check_metadata.dict() + ) + fill_json(finding_output, audit_info, finding) - json.dump( - finding_output.dict(), file_descriptors["json"], indent=4 - ) - file_descriptors["json"].write(",") + json.dump( + finding_output.dict(), + file_descriptors["json"], + indent=4, + ) + file_descriptors["json"].write(",") - if "json-asff" in file_descriptors: - finding_output = Check_Output_JSON_ASFF() - fill_json_asff(finding_output, audit_info, finding) + if "json-asff" in file_descriptors: + finding_output = Check_Output_JSON_ASFF() + fill_json_asff(finding_output, audit_info, finding) - json.dump( - finding_output.dict(), - file_descriptors["json-asff"], - indent=4, - ) - file_descriptors["json-asff"].write(",") + json.dump( + finding_output.dict(), + file_descriptors["json-asff"], + indent=4, + ) + file_descriptors["json-asff"].write(",") - # Check if it is needed to send findings to security hub - if output_options.security_hub_enabled: - send_to_security_hub( - finding.region, finding_output, audit_info.audit_session - ) - else: # No service resources in the whole account - color = set_report_color("INFO") - if not output_options.is_quiet and output_options.verbose: - print(f"\t{color}INFO{Style.RESET_ALL} There are no resources") - # Separator between findings and bar - if output_options.is_quiet or output_options.verbose: - print() - if file_descriptors: - # Close all file descriptors - for file_descriptor in file_descriptors: - file_descriptors.get(file_descriptor).close() + if "html" in file_descriptors: + fill_html(file_descriptors["html"], audit_info, finding) + + file_descriptors["html"].write("") + + # Check if it is needed to send findings to security hub + if output_options.security_hub_enabled: + send_to_security_hub( + finding.region, finding_output, audit_info.audit_session + ) + else: # No service resources in the whole account + color = set_report_color("INFO") + if not output_options.is_quiet and output_options.verbose: + print(f"\t{color}INFO{Style.RESET_ALL} There are no resources") + # Separator between findings and bar + if output_options.is_quiet or output_options.verbose: + print() + if file_descriptors: + # Close all file descriptors + for file_descriptor in file_descriptors: + file_descriptors.get(file_descriptor).close() + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) def initialize_file_descriptor( - filename: str, output_mode: str, format: Any = None + filename: str, + output_mode: str, + audit_info: AWS_Audit_Info, + format: Any = None, ) -> TextIOWrapper: """Open/Create the output file. If needed include headers or the required format""" - - if file_exists(filename): - file_descriptor = open_file( - filename, - "a", - ) - else: - file_descriptor = open_file( - filename, - "a", - ) - - if output_mode in ("csv", "ens_rd2022_aws"): - # Format is the class model of the CSV format to print the headers - csv_header = [x.upper() for x in generate_csv_fields(format)] - csv_writer = DictWriter( - file_descriptor, fieldnames=csv_header, delimiter=";" - ) - csv_writer.writeheader() - - if output_mode in ("json", "json-asff"): + try: + if file_exists(filename): file_descriptor = open_file( filename, "a", ) - file_descriptor.write("[") + else: + file_descriptor = open_file( + filename, + "a", + ) + + if output_mode in ("csv", "ens_rd2022_aws"): + # Format is the class model of the CSV format to print the headers + csv_header = [x.upper() for x in generate_csv_fields(format)] + csv_writer = DictWriter( + file_descriptor, fieldnames=csv_header, delimiter=";" + ) + csv_writer.writeheader() + + if output_mode in ("json", "json-asff"): + file_descriptor.write("[") + + if "html" in output_mode: + add_html_header(file_descriptor, audit_info) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) return file_descriptor -def fill_file_descriptors(output_modes, output_directory, output_filename): - file_descriptors = {} - if output_modes: - for output_mode in output_modes: - if output_mode == "csv": - filename = f"{output_directory}/{output_filename}{csv_file_suffix}" - file_descriptor = initialize_file_descriptor( - filename, output_mode, Check_Output_CSV - ) - file_descriptors.update({output_mode: file_descriptor}) +def fill_file_descriptors(output_modes, output_directory, output_filename, audit_info): + try: + file_descriptors = {} + if output_modes: + for output_mode in output_modes: + if output_mode == "csv": + filename = f"{output_directory}/{output_filename}{csv_file_suffix}" + file_descriptor = initialize_file_descriptor( + filename, + output_mode, + audit_info, + Check_Output_CSV, + ) + file_descriptors.update({output_mode: file_descriptor}) - if output_mode == "json": - filename = f"{output_directory}/{output_filename}{json_file_suffix}" - file_descriptor = initialize_file_descriptor(filename, output_mode) - file_descriptors.update({output_mode: file_descriptor}) + if output_mode == "json": + filename = f"{output_directory}/{output_filename}{json_file_suffix}" + file_descriptor = initialize_file_descriptor( + filename, output_mode, audit_info + ) + file_descriptors.update({output_mode: file_descriptor}) - if output_mode == "json-asff": - filename = ( - f"{output_directory}/{output_filename}{json_asff_file_suffix}" - ) - file_descriptor = initialize_file_descriptor(filename, output_mode) - file_descriptors.update({output_mode: file_descriptor}) + if output_mode == "json-asff": + filename = ( + f"{output_directory}/{output_filename}{json_asff_file_suffix}" + ) + file_descriptor = initialize_file_descriptor( + filename, output_mode, audit_info + ) + file_descriptors.update({output_mode: file_descriptor}) - if output_mode == "ens_rd2022_aws": - filename = f"{output_directory}/{output_filename}_ens_rd2022_aws{csv_file_suffix}" - file_descriptor = initialize_file_descriptor( - filename, output_mode, Check_Output_CSV_ENS_RD2022 - ) - file_descriptors.update({output_mode: file_descriptor}) + if output_mode == "html": + filename = f"{output_directory}/{output_filename}{html_file_suffix}" + file_descriptor = initialize_file_descriptor( + filename, output_mode, audit_info + ) + file_descriptors.update({output_mode: file_descriptor}) + + if output_mode == "ens_rd2022_aws": + filename = f"{output_directory}/{output_filename}_ens_rd2022_aws{csv_file_suffix}" + file_descriptor = initialize_file_descriptor( + filename, output_mode, audit_info, Check_Output_CSV_ENS_RD2022 + ) + file_descriptors.update({output_mode: file_descriptor}) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) return file_descriptors @@ -333,6 +379,34 @@ def fill_json_asff(finding_output, audit_info, finding): return finding_output +def fill_html(file_descriptor, audit_info, finding): + row_class = "p-3 mb-2 bg-success-custom" + if finding.status == "INFO": + row_class = "table-info" + elif finding.status == "FAIL": + row_class = "table-danger" + elif finding.status == "WARNING": + row_class = "table-warning" + file_descriptor.write( + f""" + + {finding.status} + {finding.check_metadata.Severity} + {audit_info.audited_account} + {finding.region} + {finding.check_metadata.ServiceName} + {finding.check_metadata.CheckID} + {finding.check_metadata.CheckTitle} + {finding.status_extended} +

{finding.check_metadata.Risk}

+

{finding.check_metadata.Remediation.Recommendation.Text}

+ + {finding.resource_id} + + """ + ) + + def close_json(output_filename, output_directory, mode): try: suffix = json_file_suffix @@ -383,11 +457,12 @@ def send_to_s3_bucket( def display_summary_table( findings: list, - audit_info, - output_filename: str, - output_directory: str, + audit_info: AWS_Audit_Info, + output_options: Output_From_Options, provider: str, ): + output_directory = output_options.output_directory + output_filename = output_options.output_filename try: if provider == "aws": entity_type = "Account" @@ -397,6 +472,7 @@ def display_summary_table( current = { "Service": "", "Provider": "", + "Total": 0, "Critical": 0, "High": 0, "Medium": 0, @@ -421,13 +497,14 @@ def display_summary_table( add_service_to_table(findings_table, current) - current["Critical"] = current["High"] = current["Medium"] = current[ - "Low" - ] = 0 + current["Total"] = current["Critical"] = current["High"] = current[ + "Medium" + ] = current["Low"] = 0 current["Service"] = finding.check_metadata.ServiceName current["Provider"] = finding.check_metadata.Provider + current["Total"] += 1 if finding.status == "PASS": pass_count += 1 elif finding.status == "FAIL": @@ -461,8 +538,17 @@ def display_summary_table( f"{Style.BRIGHT}* You only see here those services that contains resources.{Style.RESET_ALL}" ) print("\nDetailed results are in:") + if "html" in output_options.output_modes: + print(f" - HTML: {output_directory}/{output_filename}.html") + if "json-asff" in output_options.output_modes: + print(f" - JSON-ASFF: {output_directory}/{output_filename}.asff.json") print(f" - CSV: {output_directory}/{output_filename}.csv") - print(f" - JSON: {output_directory}/{output_filename}.json\n") + print(f" - JSON: {output_directory}/{output_filename}.json") + + else: + print( + f"\n {Style.BRIGHT}There are no findings in {entity_type} {Fore.YELLOW}{audit_info.audited_account}{Style.RESET_ALL}\n" + ) except Exception as error: logger.critical( @@ -483,7 +569,7 @@ def add_service_to_table(findings_table, current): ) current["Status"] = f"{Fore.RED}FAIL ({total_fails}){Style.RESET_ALL}" else: - current["Status"] = f"{Fore.GREEN}PASS {Style.RESET_ALL}" + current["Status"] = f"{Fore.GREEN}PASS ({current['Total']}){Style.RESET_ALL}" findings_table["Provider"].append(current["Provider"]) findings_table["Service"].append(current["Service"]) findings_table["Status"].append(current["Status"]) @@ -601,3 +687,208 @@ def display_compliance_table( f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}" ) sys.exit() + + +def add_html_header(file_descriptor, audit_info): + try: + if isinstance(audit_info.audited_regions, list): + audited_regions = " ".join(audit_info.audited_regions) + else: + audited_regions = audit_info.audited_regions + file_descriptor.write( + """ + + + + + + + + + + + + + + Prowler - AWS Security Assessments + + + +
+
+
+
+
+ Report Information: +
+
    +
  • +
    +
    + Version: """ + + prowler_version + + """ +
    +
    +
  • +
  • + Parameters used: """ + + " ".join(sys.argv[1:]) + + """ +
  • +
  • + Date: """ + + output_file_timestamp + + """ +
  • +
  • + prowler-logo +
  • +
+
+
+
+
+
+ Assessment Summary: +
+
    +
  • + AWS Account: """ + + audit_info.audited_account + + """ +
  • +
  • + AWS-CLI Profile: """ + + audit_info.profile + + """ +
  • +
  • + Audited Regions: """ + + audited_regions + + """ +
  • +
  • + User Id: """ + + audit_info.audited_user_id + + """ +
  • +
  • + Caller Identity ARN: """ + + audit_info.audited_identity_arn + + """ +
  • +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + """ + ) + except Exception as error: + logger.error( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}" + ) + + +def add_html_footer(output_filename, output_directory): + try: + filename = f"{output_directory}/{output_filename}{html_file_suffix}" + file_descriptor = open_file( + filename, + "a", + ) + file_descriptor.write( + """ + +
StatusSeverityAccount IDRegionServiceCheck IDCheck TitleCheck OutputRiskRemediationRelated URLResource ID
+
+
+
+
+ + + + + + + + + + +""" + ) + file_descriptor.close() + except Exception as error: + logger.critical( + f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}" + ) + sys.exit() diff --git a/lib/outputs/outputs_test.py b/lib/outputs/outputs_test.py index d9b203f0..c36f91e0 100644 --- a/lib/outputs/outputs_test.py +++ b/lib/outputs/outputs_test.py @@ -42,7 +42,20 @@ class Test_Outputs: def test_fill_file_descriptors(self): audited_account = "123456789012" output_directory = f"{os.path.dirname(os.path.realpath(__file__))}" - generate_csv_fields(Check_Output_CSV) + audit_info = AWS_Audit_Info( + original_session=None, + audit_session=None, + audited_account="123456789012", + audited_identity_arn="test-arn", + audited_user_id="test", + audited_partition="aws", + profile="default", + profile_region="eu-west-1", + credentials=None, + assumed_role_info=None, + audited_regions=["eu-west-2", "eu-west-1"], + organizations_metadata=None, + ) test_output_modes = [ ["csv"], ["json"], @@ -101,6 +114,7 @@ class Test_Outputs: output_mode_list, output_directory, output_filename, + audit_info, ) for output_mode in output_mode_list: assert ( diff --git a/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py b/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py index ef6e73bd..3b483c2e 100644 --- a/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py +++ b/providers/aws/services/cloudformation/cloudformation_outputs_find_secrets/cloudformation_outputs_find_secrets.py @@ -21,7 +21,8 @@ class cloudformation_outputs_find_secrets(Check): report.region = stack.region report.resource_id = stack.name report.resource_arn = stack.arn - + report.status = "PASS" + report.status_extended = f"No secrets found in Stack {stack.name} Outputs." if stack.outputs: temp_output_file = tempfile.NamedTemporaryFile(delete=False) @@ -41,11 +42,6 @@ class cloudformation_outputs_find_secrets(Check): report.status_extended = ( f"Potential secret found in Stack {stack.name} Outputs." ) - else: - report.status = "PASS" - report.status_extended = ( - f"No secrets found in Stack {stack.name} Outputs." - ) os.remove(temp_output_file.name) else: diff --git a/prowler b/prowler index 1b32a651..c94fd985 100755 --- a/prowler +++ b/prowler @@ -31,6 +31,7 @@ from lib.check.checks_loader import load_checks_to_execute from lib.check.compliance import update_checks_metadata_with_compliance from lib.logger import logger, set_logging_config from lib.outputs.outputs import ( + add_html_footer, close_json, display_compliance_table, display_summary_table, @@ -167,7 +168,7 @@ if __name__ == "__main__": nargs="+", help="Output mode, by default csv", default=["csv", "json"], - choices=["csv", "json", "json-asff"], + choices=["csv", "json", "json-asff", "html"], ) parser.add_argument( "-F", @@ -404,6 +405,8 @@ if __name__ == "__main__": # Close json file if exists if mode == "json" or mode == "json-asff": close_json(output_filename, output_directory, mode) + if mode == "html": + add_html_footer(output_filename, output_directory) # Send output to S3 if needed (-B / -D) if args.output_bucket or args.output_bucket_no_assume: output_bucket = args.output_bucket @@ -424,18 +427,19 @@ if __name__ == "__main__": if args.security_hub: resolve_security_hub_previous_findings(output_directory, audit_info) - if findings: - # Display summary table - display_summary_table( - findings, audit_info, output_filename, output_directory, provider - ) + # Display summary table + display_summary_table( + findings, + audit_info, + audit_output_options, + provider, + ) - if compliance_framework: - # Display compliance table - display_compliance_table( - findings, - bulk_checks_metadata, - compliance_framework, - output_filename, - output_directory, - ) + if compliance_framework and findings: + # Display compliance table + display_compliance_table( + findings, + bulk_checks_metadata, + compliance_framework, + audit_output_options, + )