fix: Refactor Outputs (#1548)

This commit is contained in:
Sergio Garcia
2022-12-20 18:23:30 +01:00
committed by GitHub
parent 9502355d22
commit bab6380d68
44 changed files with 1153 additions and 1106 deletions

View File

@@ -23,14 +23,13 @@ from prowler.lib.check.compliance import update_checks_metadata_with_compliance
from prowler.lib.cli.parser import ProwlerArgumentParser
from prowler.lib.logger import logger, set_logging_config
from prowler.lib.outputs.outputs import (
add_html_footer,
close_json,
display_compliance_table,
display_summary_table,
extract_findings_statistics,
fill_html_overview_statistics,
send_to_s3_bucket,
)
from prowler.lib.outputs.compliance import display_compliance_table
from prowler.lib.outputs.html import add_html_footer, fill_html_overview_statistics
from prowler.lib.outputs.json import close_json
from prowler.lib.outputs.summary_table import display_summary_table
from prowler.providers.aws.lib.allowlist.allowlist import parse_allowlist_file
from prowler.providers.aws.lib.quick_inventory.quick_inventory import quick_inventory
from prowler.providers.aws.lib.security_hub.security_hub import (

View File

View File

@@ -0,0 +1,322 @@
import sys
from csv import DictWriter
from colorama import Fore, Style
from tabulate import tabulate
from prowler.config.config import timestamp
from prowler.lib.logger import logger
from prowler.lib.outputs.models import (
Check_Output_CSV_CIS,
Check_Output_CSV_ENS_RD2022,
generate_csv_fields,
)
def fill_compliance(output_options, finding, audit_info, file_descriptors):
# We have to retrieve all the check's compliance requirements
check_compliance = output_options.bulk_checks_metadata[
finding.check_metadata.CheckID
].Compliance
csv_header = compliance_row = None
for compliance in check_compliance:
if (
compliance.Framework == "ENS"
and compliance.Version == "RD2022"
and "ens_rd2022_aws" in output_options.output_modes
):
for requirement in compliance.Requirements:
requirement_description = requirement.Description
requirement_id = requirement.Id
for attribute in requirement.Attributes:
compliance_row = Check_Output_CSV_ENS_RD2022(
Provider=finding.check_metadata.Provider,
AccountId=audit_info.audited_account,
Region=finding.region,
AssessmentDate=timestamp.isoformat(),
Requirements_Id=requirement_id,
Requirements_Description=requirement_description,
Requirements_Attributes_IdGrupoControl=attribute.get(
"IdGrupoControl"
),
Requirements_Attributes_Marco=attribute.get("Marco"),
Requirements_Attributes_Categoria=attribute.get("Categoria"),
Requirements_Attributes_DescripcionControl=attribute.get(
"DescripcionControl"
),
Requirements_Attributes_Nivel=attribute.get("Nivel"),
Requirements_Attributes_Tipo=attribute.get("Tipo"),
Requirements_Attributes_Dimensiones=",".join(
attribute.get("Dimensiones")
),
Status=finding.status,
StatusExtended=finding.status_extended,
ResourceId=finding.resource_id,
CheckId=finding.check_metadata.CheckID,
)
csv_header = generate_csv_fields(Check_Output_CSV_ENS_RD2022)
elif compliance.Framework == "CIS-AWS" and "cis" in str(
output_options.output_modes
):
for requirement in compliance.Requirements:
requirement_description = requirement.Description
requirement_id = requirement.Id
for attribute in requirement.Attributes:
compliance_row = Check_Output_CSV_CIS(
Provider=finding.check_metadata.Provider,
AccountId=audit_info.audited_account,
Region=finding.region,
AssessmentDate=timestamp.isoformat(),
Requirements_Id=requirement_id,
Requirements_Description=requirement_description,
Requirements_Attributes_Section=attribute.get("Section"),
Requirements_Attributes_Profile=attribute.get("Profile"),
Requirements_Attributes_AssessmentStatus=attribute.get(
"AssessmentStatus"
),
Requirements_Attributes_Description=attribute.get(
"Description"
),
Requirements_Attributes_RationaleStatement=attribute.get(
"RationaleStatement"
),
Requirements_Attributes_ImpactStatement=attribute.get(
"ImpactStatement"
),
Requirements_Attributes_RemediationProcedure=attribute.get(
"RemediationProcedure"
),
Requirements_Attributes_AuditProcedure=attribute.get(
"AuditProcedure"
),
Requirements_Attributes_AdditionalInformation=attribute.get(
"AdditionalInformation"
),
Requirements_Attributes_References=attribute.get("References"),
Status=finding.status,
StatusExtended=finding.status_extended,
ResourceId=finding.resource_id,
CheckId=finding.check_metadata.CheckID,
)
csv_header = generate_csv_fields(Check_Output_CSV_CIS)
if compliance_row:
csv_writer = DictWriter(
file_descriptors[output_options.output_modes[-1]],
fieldnames=csv_header,
delimiter=";",
)
csv_writer.writerow(compliance_row.__dict__)
def display_compliance_table(
findings: list,
bulk_checks_metadata: dict,
compliance_framework: str,
output_filename: str,
output_directory: str,
):
try:
if "ens_rd2022_aws" in compliance_framework:
marcos = {}
ens_compliance_table = {
"Proveedor": [],
"Marco/Categoria": [],
"Estado": [],
"PYTEC": [],
"Alto": [],
"Medio": [],
"Bajo": [],
}
pass_count = fail_count = 0
for finding in findings:
check = bulk_checks_metadata[finding.check_metadata.CheckID]
check_compliances = check.Compliance
for compliance in check_compliances:
if (
compliance.Framework == "ENS"
and compliance.Provider == "AWS"
and compliance.Version == "RD2022"
):
compliance_version = compliance.Version
compliance_fm = compliance.Framework
compliance_provider = compliance.Provider
for requirement in compliance.Requirements:
for attribute in requirement.Attributes:
marco_categoria = (
f"{attribute['Marco']}/{attribute['Categoria']}"
)
# Check if Marco/Categoria exists
if marco_categoria not in marcos:
marcos[marco_categoria] = {
"Estado": f"{Fore.GREEN}CUMPLE{Style.RESET_ALL}",
"Pytec": 0,
"Alto": 0,
"Medio": 0,
"Bajo": 0,
}
if finding.status == "FAIL":
fail_count += 1
marcos[marco_categoria][
"Estado"
] = f"{Fore.RED}NO CUMPLE{Style.RESET_ALL}"
elif finding.status == "PASS":
pass_count += 1
if attribute["Nivel"] == "pytec":
marcos[marco_categoria]["Pytec"] += 1
elif attribute["Nivel"] == "alto":
marcos[marco_categoria]["Alto"] += 1
elif attribute["Nivel"] == "medio":
marcos[marco_categoria]["Medio"] += 1
elif attribute["Nivel"] == "bajo":
marcos[marco_categoria]["Bajo"] += 1
# Add results to table
for marco in marcos:
ens_compliance_table["Proveedor"].append("aws")
ens_compliance_table["Marco/Categoria"].append(marco)
ens_compliance_table["Estado"].append(marcos[marco]["Estado"])
ens_compliance_table["PYTEC"].append(
f"{Fore.LIGHTRED_EX}{marcos[marco]['Pytec']}{Style.RESET_ALL}"
)
ens_compliance_table["Alto"].append(
f"{Fore.RED}{marcos[marco]['Alto']}{Style.RESET_ALL}"
)
ens_compliance_table["Medio"].append(
f"{Fore.YELLOW}{marcos[marco]['Medio']}{Style.RESET_ALL}"
)
ens_compliance_table["Bajo"].append(
f"{Fore.BLUE}{marcos[marco]['Bajo']}{Style.RESET_ALL}"
)
if fail_count + pass_count < 0:
print(
f"\n {Style.BRIGHT}There are no resources for {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}.\n"
)
else:
print(
f"\nEstado de Cumplimiento de {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}:"
)
overview_table = [
[
f"{Fore.RED}{round(fail_count/(fail_count+pass_count)*100, 2)}% ({fail_count}) NO CUMPLE{Style.RESET_ALL}",
f"{Fore.GREEN}{round(pass_count/(fail_count+pass_count)*100, 2)}% ({pass_count}) CUMPLE{Style.RESET_ALL}",
]
]
print(tabulate(overview_table, tablefmt="rounded_grid"))
print(
f"\nResultados de {Fore.YELLOW}{compliance_fm} {compliance_version} - {compliance_provider}{Style.RESET_ALL}:"
)
print(
tabulate(
ens_compliance_table, headers="keys", tablefmt="rounded_grid"
)
)
print(
f"{Style.BRIGHT}* Solo aparece el Marco/Categoria que contiene resultados.{Style.RESET_ALL}"
)
print("\nResultados detallados en:")
print(
f" - CSV: {output_directory}/{output_filename}_{compliance_framework[0]}.csv\n"
)
if "cis" in str(compliance_framework):
sections = {}
cis_compliance_table = {
"Provider": [],
"Section": [],
"Level 1": [],
"Level 2": [],
}
pass_count = fail_count = 0
for finding in findings:
check = bulk_checks_metadata[finding.check_metadata.CheckID]
check_compliances = check.Compliance
for compliance in check_compliances:
if compliance.Framework == "CIS-AWS" and compliance.Version in str(
compliance_framework
):
compliance_version = compliance.Version
compliance_fm = compliance.Framework
for requirement in compliance.Requirements:
for attribute in requirement.Attributes:
section = attribute["Section"]
# Check if Section exists
if section not in sections:
sections[section] = {
"Status": f"{Fore.GREEN}PASS{Style.RESET_ALL}",
"Level 1": {"FAIL": 0, "PASS": 0},
"Level 2": {"FAIL": 0, "PASS": 0},
}
if finding.status == "FAIL":
fail_count += 1
elif finding.status == "PASS":
pass_count += 1
if attribute["Profile"] == "Level 1":
if finding.status == "FAIL":
sections[section]["Level 1"]["FAIL"] += 1
else:
sections[section]["Level 1"]["PASS"] += 1
elif attribute["Profile"] == "Level 2":
if finding.status == "FAIL":
sections[section]["Level 2"]["FAIL"] += 1
else:
sections[section]["Level 2"]["PASS"] += 1
# Add results to table
sections = dict(sorted(sections.items()))
for section in sections:
cis_compliance_table["Provider"].append("aws")
cis_compliance_table["Section"].append(section)
if sections[section]["Level 1"]["FAIL"] > 0:
cis_compliance_table["Level 1"].append(
f"{Fore.RED}FAIL({sections[section]['Level 1']['FAIL']}){Style.RESET_ALL}"
)
else:
cis_compliance_table["Level 1"].append(
f"{Fore.GREEN}PASS({sections[section]['Level 1']['PASS']}){Style.RESET_ALL}"
)
if sections[section]["Level 2"]["FAIL"] > 0:
cis_compliance_table["Level 2"].append(
f"{Fore.RED}FAIL({sections[section]['Level 2']['FAIL']}){Style.RESET_ALL}"
)
else:
cis_compliance_table["Level 2"].append(
f"{Fore.GREEN}PASS({sections[section]['Level 2']['PASS']}){Style.RESET_ALL}"
)
if fail_count + pass_count < 0:
print(
f"\n {Style.BRIGHT}There are no resources for {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL}.\n"
)
else:
print(
f"\nCompliance Status of {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL} Framework:"
)
overview_table = [
[
f"{Fore.RED}{round(fail_count/(fail_count+pass_count)*100, 2)}% ({fail_count}) FAIL{Style.RESET_ALL}",
f"{Fore.GREEN}{round(pass_count/(fail_count+pass_count)*100, 2)}% ({pass_count}) PASS{Style.RESET_ALL}",
]
]
print(tabulate(overview_table, tablefmt="rounded_grid"))
print(
f"\nFramework {Fore.YELLOW}{compliance_fm}-{compliance_version}{Style.RESET_ALL} Results:"
)
print(
tabulate(
cis_compliance_table, headers="keys", tablefmt="rounded_grid"
)
)
print(
f"{Style.BRIGHT}* Only sections containing results appear.{Style.RESET_ALL}"
)
print("\nDetailed Results in:")
print(
f" - CSV: {output_directory}/{output_filename}_{compliance_framework[0]}.csv\n"
)
except Exception as error:
logger.critical(
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
)
sys.exit()

View File

@@ -0,0 +1,140 @@
from csv import DictWriter
from io import TextIOWrapper
from typing import Any
from prowler.config.config import (
csv_file_suffix,
html_file_suffix,
json_asff_file_suffix,
json_file_suffix,
)
from prowler.lib.logger import logger
from prowler.lib.outputs.html import add_html_header
from prowler.lib.outputs.models import (
Aws_Check_Output_CSV,
Azure_Check_Output_CSV,
Check_Output_CSV_CIS,
Check_Output_CSV_ENS_RD2022,
generate_csv_fields,
)
from prowler.lib.utils.utils import file_exists, open_file
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
from prowler.providers.azure.lib.audit_info.models import Azure_Audit_Info
def initialize_file_descriptor(
filename: str,
output_mode: str,
audit_info: AWS_Audit_Info,
format: Any = None,
) -> TextIOWrapper:
"""Open/Create the output file. If needed include headers or the required format"""
try:
if file_exists(filename):
file_descriptor = open_file(
filename,
"a",
)
else:
file_descriptor = open_file(
filename,
"a",
)
if output_mode in ("csv", "ens_rd2022_aws", "cis_1.5_aws", "cis_1.4_aws"):
# Format is the class model of the CSV format to print the headers
csv_header = [x.upper() for x in generate_csv_fields(format)]
csv_writer = DictWriter(
file_descriptor, fieldnames=csv_header, delimiter=";"
)
csv_writer.writeheader()
if output_mode in ("json", "json-asff"):
file_descriptor.write("[")
if "html" in output_mode:
add_html_header(file_descriptor, audit_info)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return file_descriptor
def fill_file_descriptors(output_modes, output_directory, output_filename, audit_info):
try:
file_descriptors = {}
if output_modes:
for output_mode in output_modes:
if output_mode == "csv":
filename = f"{output_directory}/{output_filename}{csv_file_suffix}"
if isinstance(audit_info, AWS_Audit_Info):
file_descriptor = initialize_file_descriptor(
filename,
output_mode,
audit_info,
Aws_Check_Output_CSV,
)
if isinstance(audit_info, Azure_Audit_Info):
file_descriptor = initialize_file_descriptor(
filename,
output_mode,
audit_info,
Azure_Check_Output_CSV,
)
file_descriptors.update({output_mode: file_descriptor})
if output_mode == "json":
filename = f"{output_directory}/{output_filename}{json_file_suffix}"
file_descriptor = initialize_file_descriptor(
filename, output_mode, audit_info
)
file_descriptors.update({output_mode: file_descriptor})
if isinstance(audit_info, AWS_Audit_Info):
if output_mode == "json-asff":
filename = f"{output_directory}/{output_filename}{json_asff_file_suffix}"
file_descriptor = initialize_file_descriptor(
filename, output_mode, audit_info
)
file_descriptors.update({output_mode: file_descriptor})
if output_mode == "html":
filename = (
f"{output_directory}/{output_filename}{html_file_suffix}"
)
file_descriptor = initialize_file_descriptor(
filename, output_mode, audit_info
)
file_descriptors.update({output_mode: file_descriptor})
if output_mode == "ens_rd2022_aws":
filename = f"{output_directory}/{output_filename}_ens_rd2022_aws{csv_file_suffix}"
file_descriptor = initialize_file_descriptor(
filename,
output_mode,
audit_info,
Check_Output_CSV_ENS_RD2022,
)
file_descriptors.update({output_mode: file_descriptor})
if output_mode == "cis_1.5_aws":
filename = f"{output_directory}/{output_filename}_cis_1.5_aws{csv_file_suffix}"
file_descriptor = initialize_file_descriptor(
filename, output_mode, audit_info, Check_Output_CSV_CIS
)
file_descriptors.update({output_mode: file_descriptor})
if output_mode == "cis_1.4_aws":
filename = f"{output_directory}/{output_filename}_cis_1.4_aws{csv_file_suffix}"
file_descriptor = initialize_file_descriptor(
filename, output_mode, audit_info, Check_Output_CSV_CIS
)
file_descriptors.update({output_mode: file_descriptor})
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return file_descriptors

361
prowler/lib/outputs/html.py Normal file
View File

@@ -0,0 +1,361 @@
import sys
from prowler.config.config import (
html_file_suffix,
html_logo_img,
html_logo_url,
prowler_version,
timestamp,
)
from prowler.lib.logger import logger
from prowler.lib.utils.utils import open_file
def add_html_header(file_descriptor, audit_info):
try:
if not audit_info.profile:
audit_info.profile = "ENV"
if isinstance(audit_info.audited_regions, list):
audited_regions = " ".join(audit_info.audited_regions)
elif not audit_info.audited_regions:
audited_regions = "All Regions"
else:
audited_regions = audit_info.audited_regions
file_descriptor.write(
"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<!-- Required meta tags -->
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<style>
.read-more {
color: #00f;
}
.bg-success-custom {
background-color: #98dea7 !important;
}
.bg-danger {
background-color: #f28484 !important;
}
</style>
<!-- Bootstrap CSS -->
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css"
integrity="sha384-9aIt2nRpC12Uk9gS9baDl411NQApFmC26EwAOH8WgZl5MYYxFfc+NcPb1dKGj7Sk" crossorigin="anonymous">
<!-- https://datatables.net/download/index with jQuery, DataTables, Buttons, SearchPanes, and Select //-->
<link rel="stylesheet" type="text/css"
href="https://cdn.datatables.net/v/dt/jqc-1.12.4/dt-1.10.25/b-1.7.1/sp-1.4.0/sl-1.3.3/datatables.min.css" />
<link rel="stylesheet" href="https://pro.fontawesome.com/releases/v5.10.0/css/all.css"
integrity="sha384-AYmEC3Yw5cVb3ZcuHtOA93w35dYTsvhLPVnYs9eStHfGJvOvKxVfELGroGkvsg+p" crossorigin="anonymous" />
<style>
.show-read-more .more-text {
display: none;
}
.dataTable {
font-size: 14px;
}
.container-fluid {
font-size: 14px;
}
.float-left {
float: left !important;
max-width: 100%;
}
</style>
<title>Prowler - The Handy Cloud Security Tool</title>
</head>
<body>
<div class="container-fluid">
<div class="row mt-3">
<div class="col-md-4">
<a href="""
+ html_logo_url
+ """><img class="float-left card-img-left mt-4 mr-4 ml-4"
src="""
+ html_logo_img
+ """
alt="prowler-logo"></a>
<div class="card">
<div class="card-header">
Report Information
</div>
<ul class="list-group list-group-flush">
<li class="list-group-item">
<div class="row">
<div class="col-md-auto">
<b>Version:</b> """
+ prowler_version
+ """
</div>
</div>
</li>
<li class="list-group-item">
<b>Parameters used:</b> """
+ " ".join(sys.argv[1:])
+ """
</li>
<li class="list-group-item">
<b>Date:</b> """
+ timestamp.isoformat()
+ """
</li>
</ul>
</div>
</div>
<div class="col-md-2">
<div class="card">
<div class="card-header">
AWS Assessment Summary
</div>
<ul class="list-group list-group-flush">
<li class="list-group-item">
<b>AWS Account:</b> """
+ audit_info.audited_account
+ """
</li>
<li class="list-group-item">
<b>AWS-CLI Profile:</b> """
+ audit_info.profile
+ """
</li>
<li class="list-group-item">
<b>Audited Regions:</b> """
+ audited_regions
+ """
</li>
</ul>
</div>
</div>
<div class="col-md-4">
<div class="card">
<div class="card-header">
AWS Credentials
</div>
<ul class="list-group list-group-flush">
<li class="list-group-item">
<b>User Id:</b> """
+ audit_info.audited_user_id
+ """
</li>
<li class="list-group-item">
<b>Caller Identity ARN:</b>
"""
+ audit_info.audited_identity_arn
+ """
</li>
</ul>
</div>
</div>
<div class="col-md-2">
<div class="card">
<div class="card-header">
Assessment Overview
</div>
<ul class="list-group list-group-flush">
<li class="list-group-item">
<b>Total Findings:</b> TOTAL_FINDINGS
</li>
<li class="list-group-item">
<b>Passed:</b> TOTAL_PASS
</li>
<li class="list-group-item">
<b>Failed:</b> TOTAL_FAIL
</li>
<li class="list-group-item">
<b>Total Resources:</b> TOTAL_RESOURCES
</li>
</ul>
</div>
</div>
</div>
</div>
<div class="row-mt-3">
<div class="col-md-12">
<table class="table compact stripe row-border ordering" id="findingsTable" data-order='[[ 5, "asc" ]]' data-page-length='100'>
<thead class="thead-light">
<tr>
<th scope="col">Status</th>
<th scope="col">Severity</th>
<th style="width:5%" scope="col">Service Name</th>
<th scope="col">Region</th>
<th style="width:20%" scope="col">Check Title</th>
<th scope="col">Resource ID</th>
<th style="width:15%" scope="col">Check Description</th>
<th scope="col">Check ID</th>
<th scope="col">Status Extended</th>
<th scope="col">Risk</th>
<th scope="col">Recomendation</th>
<th style="width:5%" scope="col">Recomendation URL</th>
</tr>
</thead>
<tbody>
"""
)
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
def fill_html(file_descriptor, finding):
row_class = "p-3 mb-2 bg-success-custom"
if finding.status == "INFO":
row_class = "table-info"
elif finding.status == "FAIL":
row_class = "table-danger"
elif finding.status == "WARNING":
row_class = "table-warning"
file_descriptor.write(
f"""
<tr class="{row_class}">
<td>{finding.status}</td>
<td>{finding.check_metadata.Severity}</td>
<td>{finding.check_metadata.ServiceName}</td>
<td>{finding.region}</td>
<td>{finding.check_metadata.CheckTitle}</td>
<td>{finding.resource_id.replace("<", "&lt;").replace(">", "&gt;").replace("_", "<wbr>_")}</td>
<td>{finding.check_metadata.Description}</td>
<td>{finding.check_metadata.CheckID.replace("_", "<wbr>_")}</td>
<td>{finding.status_extended.replace("<", "&lt;").replace(">", "&gt;").replace("_", "<wbr>_")}</td>
<td><p class="show-read-more">{finding.check_metadata.Risk}</p></td>
<td><p class="show-read-more">{finding.check_metadata.Remediation.Recommendation.Text}</p></td>
<td><a class="read-more" href="{finding.check_metadata.Remediation.Recommendation.Url}"><i class="fas fa-external-link-alt"></i></a></td>
</tr>
"""
)
def fill_html_overview_statistics(stats, output_filename, output_directory):
try:
filename = f"{output_directory}/{output_filename}{html_file_suffix}"
# Read file
with open(filename, "r") as file:
filedata = file.read()
# Replace statistics
# TOTAL_FINDINGS
filedata = filedata.replace("TOTAL_FINDINGS", str(stats.get("findings_count")))
# TOTAL_RESOURCES
filedata = filedata.replace(
"TOTAL_RESOURCES", str(stats.get("resources_count"))
)
# TOTAL_PASS
filedata = filedata.replace("TOTAL_PASS", str(stats.get("total_pass")))
# TOTAL_FAIL
filedata = filedata.replace("TOTAL_FAIL", str(stats.get("total_fail")))
# Write file
with open(filename, "w") as file:
file.write(filedata)
except Exception as error:
logger.critical(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
sys.exit()
def add_html_footer(output_filename, output_directory):
try:
filename = f"{output_directory}/{output_filename}{html_file_suffix}"
file_descriptor = open_file(
filename,
"a",
)
file_descriptor.write(
"""
</tbody>
</table>
</div>
</div>
</div>
</div>
<!-- Table search and paginator -->
<!-- Optional JavaScript -->
<!-- jQuery first, then Popper.js, then Bootstrap JS -->
<script src="https://code.jquery.com/jquery-3.5.1.min.js"
integrity="sha256-9/aliU8dGd2tb6OSsuzixeV4y/faTqgFtohetphbbj0=" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/js/bootstrap.bundle.min.js"
integrity="sha384-1CmrxMRARb6aLqgBO7yyAxTOQE2AKb9GfXnEo760AUcUmFx3ibVJJAzGytlQcNXd"
crossorigin="anonymous"></script>
<!-- https://datatables.net/download/index with jQuery, DataTables, Buttons, SearchPanes, and Select //-->
<script type="text/javascript"
src="https://cdn.datatables.net/v/dt/jqc-1.12.4/dt-1.10.25/b-1.7.1/sp-1.4.0/sl-1.3.3/datatables.min.js"></script>
<script>
$(document).ready(function () {
// Initialise the table with 50 rows, and some search/filtering panes
$('#findingsTable').DataTable({
responsive: true,
// Show 25, 50, 100 and All records
lengthChange: true,
lengthMenu: [[25, 50, 100, -1], [25, 50, 100, "All"]],
searchPanes: {
cascadePanes: true,
viewTotal: true,
},
dom: 'Blfrtip',
language: {
// To enable a filter button instead of the filter row
searchPanes: {
clearMessage: 'Clear Filters',
collapse: { 0: 'Filters', _: 'Filters (%d)' },
initCollapsed: true
}
},
buttons: [
{
extend: 'searchPanes',
config: {
cascadePanes: true,
viewTotal: true,
orderable: false
}
}
],
columnDefs: [
{
searchPanes: {
show: true,
pagingType: 'numbers',
searching: true
},
// Show all filters
targets: [0, 1, 2, 3, 5, 7]
}
]
});
var maxLength = 30;
// ReadMore ReadLess
$(".show-read-more").each(function () {
var myStr = $(this).text();
if ($.trim(myStr).length > maxLength) {
var newStr = myStr.substring(0, maxLength);
var removedStr = myStr.substring(maxLength, $.trim(myStr).length);
$(this).empty().html(newStr);
$(this).append(' <a href="javascript:void(0);" class="read-more">read more...</a>');
$(this).append('<span class="more-text">' + removedStr + '</span>');
}
});
$(".read-more").click(function () {
$(this).siblings(".more-text").contents().unwrap();
$(this).remove();
});
});
</script>
</body>
</html>
"""
)
file_descriptor.close()
except Exception as error:
logger.critical(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
sys.exit()

View File

@@ -0,0 +1,77 @@
import os
import sys
from prowler.config.config import (
json_asff_file_suffix,
json_file_suffix,
prowler_version,
timestamp_utc,
)
from prowler.lib.logger import logger
from prowler.lib.outputs.models import Compliance, ProductFields, Resource, Severity
from prowler.lib.utils.utils import hash_sha512, open_file
def fill_json_asff(finding_output, audit_info, finding):
# Check if there are no resources in the finding
if finding.resource_id == "":
finding.resource_id = "NONE_PROVIDED"
finding_output.Id = f"prowler-{finding.check_metadata.CheckID}-{audit_info.audited_account}-{finding.region}-{hash_sha512(finding.resource_id)}"
finding_output.ProductArn = f"arn:{audit_info.audited_partition}:securityhub:{finding.region}::product/prowler/prowler"
finding_output.ProductFields = ProductFields(
ProviderVersion=prowler_version, ProwlerResourceName=finding.resource_id
)
finding_output.GeneratorId = "prowler-" + finding.check_metadata.CheckID
finding_output.AwsAccountId = audit_info.audited_account
finding_output.Types = finding.check_metadata.CheckType
finding_output.FirstObservedAt = (
finding_output.UpdatedAt
) = finding_output.CreatedAt = timestamp_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
finding_output.Severity = Severity(Label=finding.check_metadata.Severity.upper())
finding_output.Title = finding.check_metadata.CheckTitle
finding_output.Description = finding.check_metadata.Description
finding_output.Resources = [
Resource(
Id=finding.resource_id,
Type=finding.check_metadata.ResourceType,
Partition=audit_info.audited_partition,
Region=finding.region,
)
]
# Check if any Requirement has > 64 characters
check_types = []
for type in finding.check_metadata.CheckType:
check_types.extend(type.split("/"))
# Add ED to PASS or FAIL (PASSED/FAILED)
finding_output.Compliance = Compliance(
Status=finding.status + "ED",
RelatedRequirements=check_types,
)
finding_output.Remediation = {
"Recommendation": finding.check_metadata.Remediation.Recommendation
}
return finding_output
def close_json(output_filename, output_directory, mode):
try:
suffix = json_file_suffix
if mode == "json-asff":
suffix = json_asff_file_suffix
filename = f"{output_directory}/{output_filename}{suffix}"
file_descriptor = open_file(
filename,
"a",
)
# Replace last comma for square bracket if not empty
if file_descriptor.tell() > 0:
file_descriptor.seek(file_descriptor.tell() - 1, os.SEEK_SET)
file_descriptor.truncate()
file_descriptor.write("]")
file_descriptor.close()
except Exception as error:
logger.critical(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] -- {error}"
)
sys.exit()

View File

@@ -29,6 +29,9 @@ def generate_provider_output_csv(provider: str, finding, audit_info, mode: str,
data["resource_name"] = finding.resource_name
data["subscription"] = finding.subscription
data["tenant_domain"] = audit_info.identity.domain
data[
"finding_unique_id"
] = f"prowler-{provider}-{finding.check_metadata.CheckID}-{finding.subscription}-{finding.resource_id}"
finding_output = output_model(**data)
if provider == "aws":
@@ -37,6 +40,9 @@ def generate_provider_output_csv(provider: str, finding, audit_info, mode: str,
data["region"] = finding.region
data["resource_id"] = finding.resource_id
data["resource_arn"] = finding.resource_arn
data[
"finding_unique_id"
] = f"prowler-{provider}-{finding.check_metadata.CheckID}-{audit_info.audited_account}-{finding.region}-{finding.resource_id}"
finding_output = output_model(**data)
if audit_info.organizations_metadata:
@@ -221,6 +227,7 @@ def generate_provider_output_json(provider: str, finding, audit_info, mode: str,
finding_output.Subscription = finding.subscription
finding_output.ResourceId = finding.resource_id
finding_output.ResourceName = finding.resource_name
finding_output.FindingUniqueId = f"prowler-{provider}-{finding.check_metadata.CheckID}-{finding.subscription}-{finding.resource_id}"
if provider == "aws":
finding_output.Profile = audit_info.profile
@@ -228,6 +235,7 @@ def generate_provider_output_json(provider: str, finding, audit_info, mode: str,
finding_output.Region = finding.region
finding_output.ResourceId = finding.resource_id
finding_output.ResourceArn = finding.resource_arn
finding_output.FindingUniqueId = f"prowler-{provider}-{finding.check_metadata.CheckID}-{audit_info.audited_account}-{finding.region}-{finding.resource_id}"
if audit_info.organizations_metadata:
finding_output.OrganizationsInfo = (

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,149 @@
import sys
from colorama import Fore, Style
from tabulate import tabulate
from prowler.lib.logger import logger
from prowler.providers.common.outputs import Provider_Output_Options
def display_summary_table(
findings: list,
audit_info,
output_options: Provider_Output_Options,
provider: str,
):
output_directory = output_options.output_directory
output_filename = output_options.output_filename
try:
if provider == "aws":
entity_type = "Account"
audited_entities = audit_info.audited_account
elif provider == "azure":
if audit_info.identity.domain:
entity_type = "Tenant Domain"
audited_entities = audit_info.identity.domain
else:
entity_type = "Tenant ID/s"
audited_entities = " ".join(audit_info.identity.tenant_ids)
if findings:
current = {
"Service": "",
"Provider": "",
"Total": 0,
"Critical": 0,
"High": 0,
"Medium": 0,
"Low": 0,
}
findings_table = {
"Provider": [],
"Service": [],
"Status": [],
"Critical": [],
"High": [],
"Medium": [],
"Low": [],
}
pass_count = fail_count = 0
for finding in findings:
# If new service and not first, add previous row
if (
current["Service"] != finding.check_metadata.ServiceName
and current["Service"]
):
add_service_to_table(findings_table, current)
current["Total"] = current["Critical"] = current["High"] = current[
"Medium"
] = current["Low"] = 0
current["Service"] = finding.check_metadata.ServiceName
current["Provider"] = finding.check_metadata.Provider
current["Total"] += 1
if finding.status == "PASS":
pass_count += 1
elif finding.status == "FAIL":
fail_count += 1
if finding.check_metadata.Severity == "critical":
current["Critical"] += 1
elif finding.check_metadata.Severity == "high":
current["High"] += 1
elif finding.check_metadata.Severity == "medium":
current["Medium"] += 1
elif finding.check_metadata.Severity == "low":
current["Low"] += 1
# Add final service
add_service_to_table(findings_table, current)
print("\nOverview Results:")
overview_table = [
[
f"{Fore.RED}{round(fail_count/len(findings)*100, 2)}% ({fail_count}) Failed{Style.RESET_ALL}",
f"{Fore.GREEN}{round(pass_count/len(findings)*100, 2)}% ({pass_count}) Passed{Style.RESET_ALL}",
]
]
print(tabulate(overview_table, tablefmt="rounded_grid"))
print(
f"\n{entity_type} {Fore.YELLOW}{audited_entities}{Style.RESET_ALL} Scan Results (severity columns are for fails only):"
)
if provider == "azure":
print(
f"\nSubscriptions scanned: {Fore.YELLOW}{' '.join(audit_info.identity.subscriptions.keys())}{Style.RESET_ALL}"
)
print(tabulate(findings_table, headers="keys", tablefmt="rounded_grid"))
print(
f"{Style.BRIGHT}* You only see here those services that contains resources.{Style.RESET_ALL}"
)
print("\nDetailed results are in:")
if "html" in output_options.output_modes:
print(f" - HTML: {output_directory}/{output_filename}.html")
if "json-asff" in output_options.output_modes:
print(f" - JSON-ASFF: {output_directory}/{output_filename}.asff.json")
if "csv" in output_options.output_modes:
print(f" - CSV: {output_directory}/{output_filename}.csv")
if "json" in output_options.output_modes:
print(f" - JSON: {output_directory}/{output_filename}.json")
else:
print(
f"\n {Style.BRIGHT}There are no findings in {entity_type} {Fore.YELLOW}{audited_entities}{Style.RESET_ALL}\n"
)
except Exception as error:
logger.critical(
f"{error.__class__.__name__}:{error.__traceback__.tb_lineno} -- {error}"
)
sys.exit()
def add_service_to_table(findings_table, current):
if (
current["Critical"] > 0
or current["High"] > 0
or current["Medium"] > 0
or current["Low"] > 0
):
total_fails = (
current["Critical"] + current["High"] + current["Medium"] + current["Low"]
)
current["Status"] = f"{Fore.RED}FAIL ({total_fails}){Style.RESET_ALL}"
else:
current["Status"] = f"{Fore.GREEN}PASS ({current['Total']}){Style.RESET_ALL}"
findings_table["Provider"].append(current["Provider"])
findings_table["Service"].append(current["Service"])
findings_table["Status"].append(current["Status"])
findings_table["Critical"].append(
f"{Fore.LIGHTRED_EX}{current['Critical']}{Style.RESET_ALL}"
)
findings_table["High"].append(f"{Fore.RED}{current['High']}{Style.RESET_ALL}")
findings_table["Medium"].append(
f"{Fore.YELLOW}{current['Medium']}{Style.RESET_ALL}"
)
findings_table["Low"].append(f"{Fore.BLUE}{current['Low']}{Style.RESET_ALL}")

View File

@@ -4,7 +4,11 @@ from operator import itemgetter
from boto3 import session
from prowler.config.config import json_asff_file_suffix, timestamp_utc
from prowler.config.config import (
json_asff_file_suffix,
output_file_timestamp,
timestamp_utc,
)
from prowler.lib.logger import logger
from prowler.lib.outputs.models import Check_Output_JSON_ASFF
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
@@ -48,7 +52,7 @@ def resolve_security_hub_previous_findings(
logger.info("Checking previous findings in Security Hub to archive them.")
# Read current findings from json-asff file
with open(
f"{output_directory}/prowler-output-{audit_info.audited_account}-{json_asff_file_suffix}"
f"{output_directory}/prowler-output-{audit_info.audited_account}-{output_file_timestamp}{json_asff_file_suffix}"
) as f:
json_asff_file = json.load(f)

View File

@@ -52,7 +52,7 @@ class AccessAnalyzer:
self.analyzers.append(
Analyzer(
"",
"",
self.audited_account,
"NOT_AVAILABLE",
"",
"",

View File

@@ -20,7 +20,7 @@ class cloudwatch_changes_to_network_acls_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_changes_to_network_gateways_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_changes_to_network_route_tables_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_changes_to_vpcs_alarm_configured(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -22,7 +22,7 @@ class cloudwatch_log_metric_filter_and_alarm_for_aws_config_configuration_change
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -22,7 +22,7 @@ class cloudwatch_log_metric_filter_and_alarm_for_cloudtrail_configuration_change
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_authentication_failures(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_aws_organizations_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_disable_or_scheduled_deletion_of_kms_cmk(Chec
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_for_s3_bucket_policy_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_policy_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_root_usage(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_security_group_changes(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_sign_in_without_mfa(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -20,7 +20,7 @@ class cloudwatch_log_metric_filter_unauthorized_api_calls(Check):
"No CloudWatch log groups found with metric filters or alarms associated."
)
report.region = "us-east-1"
report.resource_id = ""
report.resource_id = cloudtrail_client.audited_account
# 1. Iterate for CloudWatch Log Group in CloudTrail trails
log_groups = []
for trail in cloudtrail_client.trails:

View File

@@ -57,7 +57,7 @@ class Config:
else:
self.recorders.append(
Recorder(
None,
self.audited_account,
None,
None,
regional_client.region,

View File

@@ -38,8 +38,11 @@ class SecretsManager:
arn=secret["ARN"],
name=secret["Name"],
region=regional_client.region,
rotation_enabled=secret["RotationEnabled"],
)
if "RotationEnabled" in secret:
self.secrets[secret["Name"]].rotation_enabled = secret[
"RotationEnabled"
]
except Exception as error:
logger.error(
@@ -53,4 +56,4 @@ class Secret(BaseModel):
arn: str
name: str
region: str
rotation_enabled: bool
rotation_enabled: bool = False

View File

@@ -16,7 +16,10 @@ from prowler.config.config import (
timestamp_utc,
)
from prowler.lib.check.models import Check_Report, load_check_metadata
from prowler.lib.outputs.file_descriptors import fill_file_descriptors
from prowler.lib.outputs.json import fill_json_asff
from prowler.lib.outputs.models import (
generate_csv_fields,
Check_Output_CSV,
Check_Output_JSON_ASFF,
Compliance,
@@ -25,9 +28,6 @@ from prowler.lib.outputs.models import (
Severity,
)
from prowler.lib.outputs.outputs import (
fill_file_descriptors,
fill_json_asff,
generate_csv_fields,
send_to_s3_bucket,
set_report_color,
)

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -49,7 +49,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -98,7 +98,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -153,7 +153,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -49,7 +49,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -98,7 +98,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -153,7 +153,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_aws_organizations_changes:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_aws_organizations_changes:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_aws_organizations_changes:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -49,7 +49,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -98,7 +98,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -153,7 +153,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail

View File

@@ -47,7 +47,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -94,7 +94,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail
@@ -147,7 +147,7 @@ class Test_cloudwatch_log_metric_filter_unauthorized_api_calls:
result[0].status_extended
== "No CloudWatch log groups found with metric filters or alarms associated."
)
assert result[0].resource_id == ""
assert result[0].resource_id == current_audit_info.audited_account
@mock_logs
@mock_cloudtrail