feat(outputS): Output generation format CSV (#1230)

* chore(csv): first version csv output

* chore(pytest): added pytest dependency

* chore(outputs): organizations demo

* chore(compliance): Added new dataclass for each compliance framework

* fix(test org values): deleted test values in orgs instantiation

* fix(csv): formatted to match output format

* fix(csv output): Reformulation of check report and minor changes

* fix(minor issues): Fix various issues coming from PR comments

* fix(csv): Renamed csv output data model

* fix(output dir): create default if not present

* fix(typo): remove s

* fix(oldcode)

* fix(typo)

* fix(output): Only send to csv when -M is passed

Co-authored-by: sergargar <sergio@verica.io>
Co-authored-by: Pepe Fagoaga <pepe@verica.io>
This commit is contained in:
Nacho Rivera
2022-07-04 10:30:47 +02:00
committed by GitHub
parent a1dcc1310a
commit 11652838e2
16 changed files with 532 additions and 90 deletions

View File

@@ -9,6 +9,7 @@ boto3 = "1.24.8"
arnparse = "0.0.2" arnparse = "0.0.2"
botocore = "1.27.8" botocore = "1.27.8"
pydantic = "1.9.1" pydantic = "1.9.1"
pytest = "7.1.2"
[dev-packages] [dev-packages]

77
Pipfile.lock generated
View File

@@ -1,7 +1,7 @@
{ {
"_meta": { "_meta": {
"hash": { "hash": {
"sha256": "b532ef32ebcb28be5438c1ef9c717aa6792cfd5098ad81a9ed35520a245bb8f2" "sha256": "f675454f65aef11134860cda6cb84cddfecfb17827ce6efbe05ce7ae8efbb926"
}, },
"pipfile-spec": 6, "pipfile-spec": 6,
"requires": { "requires": {
@@ -24,21 +24,29 @@
"index": "pypi", "index": "pypi",
"version": "==0.0.2" "version": "==0.0.2"
}, },
"attrs": {
"hashes": [
"sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4",
"sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==21.4.0"
},
"boto3": { "boto3": {
"hashes": [ "hashes": [
"sha256:490f5e88f5551b33ae3019a37412158b76426d63d1fb910968ade9b6a024e5fe", "sha256:551e902b70ccf9f6a58e28bb409718a0403b021b17ff6d63ab0b9af5a122386e",
"sha256:e284705da36faa668c715ae1f74ebbff4320dbfbe3a733df3a8ab076d1ed1226" "sha256:abe5b44010e3f50c5e0243aa4fc2338f10e5a868413faa0d6ae79131d6b507b8"
], ],
"index": "pypi", "index": "pypi",
"version": "==1.24.14" "version": "==1.24.21"
}, },
"botocore": { "botocore": {
"hashes": [ "hashes": [
"sha256:bb56fa77b8fa1ec367c2e16dee62d60000451aac5140dcce3ebddc167fd5c593", "sha256:3bafa8e773d207c0ce02c63790a8820562e22d2e892abaf1eb90c343e995218a",
"sha256:df1e9b208ff93daac7c645b0b04fb6dccd7f20262eae24d87941727025cbeece" "sha256:b685ffc0ac170bf7de5fde931504eccd939f8545a9c9d10259245ca4c91101e5"
], ],
"index": "pypi", "index": "pypi",
"version": "==1.27.14" "version": "==1.27.21"
}, },
"colorama": { "colorama": {
"hashes": [ "hashes": [
@@ -48,6 +56,13 @@
"index": "pypi", "index": "pypi",
"version": "==0.4.5" "version": "==0.4.5"
}, },
"iniconfig": {
"hashes": [
"sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3",
"sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"
],
"version": "==1.1.1"
},
"jmespath": { "jmespath": {
"hashes": [ "hashes": [
"sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980",
@@ -56,6 +71,30 @@
"markers": "python_version >= '3.7'", "markers": "python_version >= '3.7'",
"version": "==1.0.1" "version": "==1.0.1"
}, },
"packaging": {
"hashes": [
"sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
"sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
],
"markers": "python_version >= '3.6'",
"version": "==21.3"
},
"pluggy": {
"hashes": [
"sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159",
"sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"
],
"markers": "python_version >= '3.6'",
"version": "==1.0.0"
},
"py": {
"hashes": [
"sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719",
"sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"version": "==1.11.0"
},
"pydantic": { "pydantic": {
"hashes": [ "hashes": [
"sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f", "sha256:02eefd7087268b711a3ff4db528e9916ac9aa18616da7bca69c1871d0b7a091f",
@@ -97,6 +136,22 @@
"index": "pypi", "index": "pypi",
"version": "==1.9.1" "version": "==1.9.1"
}, },
"pyparsing": {
"hashes": [
"sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb",
"sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"
],
"markers": "python_full_version >= '3.6.8'",
"version": "==3.0.9"
},
"pytest": {
"hashes": [
"sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c",
"sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45"
],
"index": "pypi",
"version": "==7.1.2"
},
"python-dateutil": { "python-dateutil": {
"hashes": [ "hashes": [
"sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86",
@@ -121,6 +176,14 @@
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.16.0" "version": "==1.16.0"
}, },
"tomli": {
"hashes": [
"sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc",
"sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"
],
"markers": "python_version >= '3.7'",
"version": "==2.0.1"
},
"typing-extensions": { "typing-extensions": {
"hashes": [ "hashes": [
"sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708", "sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708",

View File

@@ -1,10 +1,16 @@
from datetime import datetime from datetime import datetime
from os import getcwd
timestamp = datetime.today().strftime("%Y-%m-%d %H:%M:%S") timestamp = datetime.today()
prowler_version = "3.0-alfa" prowler_version = "3.0-alfa"
# Groups # Groups
groups_file = "groups.json" groups_file = "groups.json"
# AWS services-regions matrix json # AWS services-regions matrix json
aws_services_json_file = "providers/aws/aws_regions_by_service.json" aws_services_json_file = "providers/aws/aws_regions_services.json"
default_output_directory = getcwd() + "/output"
csv_file_suffix = timestamp.strftime("%Y%m%d%H%M%S") + ".csv"

View File

@@ -14,6 +14,6 @@ def print_banner():
| |_) | | | (_) \ V V /| | __/ | | |_) | | | (_) \ V V /| | __/ |
| .__/|_| \___/ \_/\_/ |_|\___|_|v{prowler_version} | .__/|_| \___/ \_/\_/ |_|\___|_|v{prowler_version}
|_|{Fore.BLUE} the handy cloud security tool |_|{Fore.BLUE} the handy cloud security tool
{Fore.YELLOW} Date: {timestamp}{Style.RESET_ALL} {Fore.YELLOW} Date: {timestamp.strftime("%Y-%m-%d %H:%M:%S")}{Style.RESET_ALL}
""" """
print(banner) print(banner)

View File

@@ -9,7 +9,7 @@ from colorama import Fore, Style
from config.config import groups_file from config.config import groups_file
from lib.check.models import Output_From_Options, load_check_metadata from lib.check.models import Output_From_Options, load_check_metadata
from lib.logger import logger from lib.logger import logger
from lib.outputs import report from lib.outputs.outputs import get_orgs_info, report
from lib.utils.utils import open_file, parse_json_file from lib.utils.utils import open_file, parse_json_file
@@ -175,22 +175,28 @@ def import_check(check_path: str) -> ModuleType:
return lib return lib
def set_output_options(quiet): def set_output_options(quiet: bool, output_modes: list, input_output_directory: str):
global output_options global output_options
output_options = Output_From_Options( output_options = Output_From_Options(
is_quiet=quiet is_quiet=quiet,
output_modes=output_modes,
output_directory=input_output_directory
# set input options here # set input options here
) )
return output_options return output_options
def run_check(check): def run_check(check, audit_info, output_options):
print( print(
f"\nCheck Name: {check.checkName} - {Fore.MAGENTA}{check.serviceName}{Fore.YELLOW} [{check.severity}]{Style.RESET_ALL}" f"\nCheck Name: {check.checkName} - {Fore.MAGENTA}{check.serviceName}{Fore.YELLOW} [{check.severity}]{Style.RESET_ALL}"
) )
logger.debug(f"Executing check: {check.checkName}") logger.debug(f"Executing check: {check.checkName}")
findings = check.execute() findings = check.execute()
report(findings, output_options)
# Call to get orgs, need to check if input option is passed in output options
# right now it is not checked and is called straight to generate the fields to be passed to the csv
organizations_info = get_orgs_info()
report(findings, output_options, audit_info, organizations_info)
def import_check(check_path: str) -> ModuleType: def import_check(check_path: str) -> ModuleType:

View File

@@ -8,21 +8,11 @@ from pydantic import BaseModel, ValidationError
from lib.logger import logger from lib.logger import logger
@dataclass
class Check_Report:
status: str
region: str
result_extended: str
def __init__(self):
self.status = ""
self.region = ""
self.result_extended = ""
@dataclass @dataclass
class Output_From_Options: class Output_From_Options:
is_quiet: bool is_quiet: bool
output_modes: list
output_directory: str
# Testing Pending # Testing Pending
@@ -174,6 +164,40 @@ class Check(ABC):
def compliance(self): def compliance(self):
return self.__Compliance__ return self.__Compliance__
@property
def metadata(self):
return self.__check_metadata__
@abstractmethod @abstractmethod
def execute(self): def execute(self):
pass pass
@dataclass
class Check_Report:
status: str
region: str
status_extended: str
check_metadata: dict
status_extended: str
resource_id: str
resource_details: str
resource_tags: str
resource_arn: str
def __init__(self, metadata):
self.check_metadata = metadata
self.status_extended = ""
self.resource_details = ""
self.resource_tags = []
self.resource_id = ""
self.resource_arn = ""
@dataclass
class Organizations_Info:
account_details_email: str
account_details_name: str
account_details_arn: str
account_details_org: str
account_details_tags: str

View File

@@ -1,30 +0,0 @@
from colorama import Fore, Style
def report(check_findings, output_options):
check_findings.sort(key=lambda x: x.region)
for finding in check_findings:
color = set_report_color(finding.status)
if output_options.is_quiet and "FAIL" in finding.status:
print(
f"{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.result_extended}"
)
elif not output_options.is_quiet:
print(
f"{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.result_extended}"
)
def set_report_color(status):
color = ""
if status == "PASS":
color = Fore.GREEN
elif status == "FAIL":
color = Fore.RED
elif status == "ERROR":
color = Fore.BLACK
elif status == "WARNING":
color = Fore.YELLOW
else:
raise Exception("Invalid Report Status. Must be PASS, FAIL, ERROR or WARNING")
return color

189
lib/outputs/models.py Normal file
View File

@@ -0,0 +1,189 @@
from dataclasses import asdict, dataclass
from config.config import timestamp
from lib.check.models import Check_Report, Organizations_Info
@dataclass
class Compliance_Framework:
Framework: str
Version: str
Group: list
Control: list
@dataclass
class Check_Output_CSV:
assessment_start_time: str
finding_unique_id: str
provider: str
profile: str
account_id: int
account_name: str
account_email: str
account_arn: str
account_org: str
account_tags: str
region: str
check_id: str
check_name: str
check_title: str
check_type: str
status: str
status_extended: str
service_name: str
subservice_name: str
severity: str
resource_id: str
resource_arn: str
resource_type: str
resource_details: str
resource_tags: list
description: dict
risk: list
related_url: list
remediation_recommendation_text: str
remediation_recommendation_url: list
remediation_recommendation_code_nativeiac: str
remediation_recommendation_code_terraform: str
remediation_recommendation_code_cli: str
remediation_recommendation_code_other: str
categories: str
depends_on: str
related_to: str
notes: str
compliance: str
def get_csv_header(self):
csv_header = []
for key in asdict(self):
csv_header = csv_header.append(key)
return csv_header
def __init__(
self,
account: str,
profile: str,
report: Check_Report,
organizations: Organizations_Info,
):
self.assessment_start_time = timestamp.isoformat()
self.finding_unique_id = ""
self.provider = report.check_metadata.Provider
self.profile = profile
self.account_id = account
self.account_name = organizations.account_details_name
self.account_email = organizations.account_details_email
self.account_arn = organizations.account_details_arn
self.account_org = organizations.account_details_org
self.account_tags = organizations.account_details_tags
self.region = report.region
self.check_id = report.check_metadata.CheckID
self.check_name = report.check_metadata.CheckName
self.check_title = report.check_metadata.CheckTitle
self.check_type = report.check_metadata.CheckType
self.status = report.status
self.status_extended = report.status_extended
self.service_name = report.check_metadata.ServiceName
self.subservice_name = report.check_metadata.SubServiceName
self.severity = report.check_metadata.Severity
self.resource_id = report.resource_id
self.resource_arn = report.resource_arn
self.resource_type = report.check_metadata.ResourceType
self.resource_details = report.resource_details
self.resource_tags = report.resource_tags
self.description = report.check_metadata.Description
self.risk = report.check_metadata.Risk
self.related_url = report.check_metadata.RelatedUrl
self.remediation_recommendation_text = report.check_metadata.Remediation[
"Recommendation"
]["Text"]
self.remediation_recommendation_url = report.check_metadata.Remediation[
"Recommendation"
]["Url"]
self.remediation_recommendation_code_nativeiac = (
report.check_metadata.Remediation["Code"]["NativeIaC"]
)
self.remediation_recommendation_code_terraform = (
report.check_metadata.Remediation["Code"]["Terraform"]
)
self.remediation_recommendation_code_cli = report.check_metadata.Remediation[
"Code"
]["cli"]
self.remediation_recommendation_code_cli = report.check_metadata.Remediation[
"Code"
]["cli"]
self.remediation_recommendation_code_other = report.check_metadata.Remediation[
"Code"
]["other"]
self.categories = self.__unroll_list__(report.check_metadata.Categories)
self.depends_on = self.__unroll_list__(report.check_metadata.DependsOn)
self.related_to = self.__unroll_list__(report.check_metadata.RelatedTo)
self.notes = report.check_metadata.Notes
self.compliance = self.__unroll_compliance__(report.check_metadata.Compliance)
def __unroll_list__(self, listed_items: list):
unrolled_items = ""
separator = "|"
for item in listed_items:
if not unrolled_items:
unrolled_items = f"{item}"
else:
unrolled_items = f"{unrolled_items}{separator}{item}"
return unrolled_items
def __unroll_dict__(self, dict_items: dict):
unrolled_items = ""
separator = "|"
for key, value in dict_items.items():
unrolled_item = f"{key}:{value}"
if not unrolled_items:
unrolled_items = f"{unrolled_item}"
else:
unrolled_items = f"{unrolled_items}{separator}{unrolled_item}"
return unrolled_items
def __unroll_compliance__(self, compliance: list):
compliance_frameworks = []
# fill list of dataclasses
for item in compliance:
compliance_framework = Compliance_Framework(
Framework=item["Framework"],
Version=item["Version"],
Group=item["Group"],
Control=item["Control"],
)
compliance_frameworks.append(compliance_framework)
# iterate over list of dataclasses to output info
unrolled_compliance = ""
groups = ""
controls = ""
item_separator = ","
framework_separator = "|"
generic_separator = "/"
for framework in compliance_frameworks:
for group in framework.Group:
if groups:
groups = f"{groups}{generic_separator}"
groups = f"{groups}{group}"
for control in framework.Control:
if controls:
controls = f"{controls}{generic_separator}"
controls = f"{controls}{control}"
if unrolled_compliance:
unrolled_compliance = f"{unrolled_compliance}{framework_separator}"
unrolled_compliance = f"{unrolled_compliance}{framework.Framework}{item_separator}{framework.Version}{item_separator}{groups}{item_separator}{controls}"
# unset groups and controls for next framework
controls = ""
groups = ""
return unrolled_compliance
def get_csv_header(self):
csv_header = []
for key in asdict(self):
csv_header = csv_header.append(key)
return csv_header

119
lib/outputs/outputs.py Normal file
View File

@@ -0,0 +1,119 @@
from csv import DictWriter
from colorama import Fore, Style
from config.config import csv_file_suffix
from lib.check.models import Organizations_Info
from lib.outputs.models import Check_Output_CSV
from lib.utils.utils import file_exists, open_file
def report(check_findings, output_options, audit_info, organizations_info):
check_findings.sort(key=lambda x: x.region)
csv_fields = []
# check output options
file_descriptors = {}
if output_options.output_modes:
if "csv" in output_options.output_modes:
csv_fields = generate_csv_fields()
file_descriptors = fill_file_descriptors(
output_options.output_modes,
audit_info.audited_account,
output_options.output_directory,
csv_fields,
)
for finding in check_findings:
# printing the finding ...
color = set_report_color(finding.status)
if output_options.is_quiet and "FAIL" in finding.status:
print(
f"{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}"
)
elif not output_options.is_quiet:
print(
f"{color}{finding.status}{Style.RESET_ALL} {finding.region}: {finding.status_extended}"
)
if file_descriptors:
# sending the finding to input options
if "csv" in file_descriptors:
finding_output = Check_Output_CSV(
audit_info.audited_account,
audit_info.profile,
finding,
organizations_info,
)
csv_writer = DictWriter(
file_descriptors["csv"], fieldnames=csv_fields, delimiter=";"
)
csv_writer.writerow(finding_output.__dict__)
if file_descriptors:
# Close all file descriptors
for file_descriptor in file_descriptors:
file_descriptors.get(file_descriptor).close()
def fill_file_descriptors(output_modes, audited_account, output_directory, csv_fields):
file_descriptors = {}
for output_mode in output_modes:
if output_mode == "csv":
filename = (
f"{output_directory}/prowler-output-{audited_account}-{csv_file_suffix}"
)
if file_exists(filename):
file_descriptor = open_file(
filename,
"a",
)
else:
file_descriptor = open_file(
filename,
"a",
)
csv_header = [x.upper() for x in csv_fields]
csv_writer = DictWriter(
file_descriptor, fieldnames=csv_header, delimiter=";"
)
csv_writer.writeheader()
file_descriptors.update({output_mode: file_descriptor})
return file_descriptors
def set_report_color(status):
color = ""
if status == "PASS":
color = Fore.GREEN
elif status == "FAIL":
color = Fore.RED
elif status == "ERROR":
color = Fore.BLACK
elif status == "WARNING":
color = Fore.YELLOW
else:
raise Exception("Invalid Report Status. Must be PASS, FAIL, ERROR or WARNING")
return color
def generate_csv_fields():
csv_fields = []
for field in Check_Output_CSV.__dict__["__annotations__"].keys():
csv_fields.append(field)
return csv_fields
def get_orgs_info():
organizations_info = Organizations_Info(
account_details_email="",
account_details_name="",
account_details_arn="",
account_details_org="",
account_details_tags="",
)
return organizations_info

View File

@@ -1,15 +1,15 @@
import json import json
import sys import sys
from io import TextIOWrapper from io import TextIOWrapper
from os.path import exists
from typing import Any from typing import Any
from lib.logger import logger from lib.logger import logger
def open_file(input_file: str) -> TextIOWrapper: def open_file(input_file: str, mode: str = "r") -> TextIOWrapper:
try: try:
# First recover the available groups in groups.json f = open(input_file, mode)
f = open(input_file)
except Exception as e: except Exception as e:
logger.critical(f"{input_file}: {e.__class__.__name__}") logger.critical(f"{input_file}: {e.__class__.__name__}")
sys.exit() sys.exit()
@@ -26,3 +26,17 @@ def parse_json_file(input_file: TextIOWrapper) -> Any:
sys.exit() sys.exit()
else: else:
return json_file return json_file
# check if file exists
def file_exists(filename: str):
try:
exists_filename = exists(filename)
except Exception as e:
logger.critical(f"{exists_filename.name}: {e.__class__.__name__}")
quit()
else:
if exists_filename:
return True
else:
return False

View File

@@ -153,11 +153,15 @@ def provider_set_session(
logger.info("Audit session is the original one") logger.info("Audit session is the original one")
current_audit_info.audit_session = current_audit_info.original_session current_audit_info.audit_session = current_audit_info.original_session
# Setting default region of session # Setting default region of session
if current_audit_info.audit_session.region_name: if current_audit_info.audit_session.region_name:
current_audit_info.profile_region = current_audit_info.audit_session.region_name current_audit_info.profile_region = current_audit_info.audit_session.region_name
else: else:
current_audit_info.profile_region = "us-east-1" current_audit_info.profile_region = "us-east-1"
return current_audit_info
def validate_credentials(validate_session): def validate_credentials(validate_session):

View File

@@ -19,6 +19,17 @@
"level2" "level2"
], ],
"Version": "1.4" "Version": "1.4"
},
{
"Control": [
"4.4"
],
"Framework": "PCI-DSS",
"Group": [
"level1",
"level2"
],
"Version": "1.4"
} }
], ],
"DependsOn": [ "DependsOn": [

View File

@@ -10,24 +10,24 @@ class ec2_ebs_snapshots_encrypted(Check):
if hasattr(regional_client, "snapshots"): if hasattr(regional_client, "snapshots"):
if regional_client.snapshots: if regional_client.snapshots:
for snapshot in regional_client.snapshots: for snapshot in regional_client.snapshots:
report = Check_Report(self.metadata)
report.region = region
if snapshot["Encrypted"]: if snapshot["Encrypted"]:
report = Check_Report()
report.status = "PASS" report.status = "PASS"
report.result_extended = ( report.status_extended = (
f"EBS Snapshot {snapshot['SnapshotId']} is encrypted" f"EBS Snapshot {snapshot['SnapshotId']} is encrypted"
) )
report.region = region report.resource_id = snapshot["SnapshotId"]
else: else:
report = Check_Report()
report.status = "FAIL" report.status = "FAIL"
report.result_extended = ( report.status_extended = (
f"EBS Snapshot {snapshot['SnapshotId']} is unencrypted" f"EBS Snapshot {snapshot['SnapshotId']} is unencrypted"
) )
report.region = region report.resource_id = snapshot["SnapshotId"]
else: else:
report = Check_Report() report = Check_Report(self.metadata)
report.status = "PASS" report.status = "PASS"
report.result_extended = "There are no EC2 EBS snapshots" report.status_extended = "There are no EC2 EBS snapshots"
report.region = region report.region = region
findings.append(report) findings.append(report)

View File

@@ -13,7 +13,10 @@ class iam_disable_30_days_credentials(Check):
if response: if response:
for user in response: for user in response:
report = Check_Report() report = Check_Report(self.metadata)
report.resource_id = user["UserName"]
report.resource_arn = user["Arn"]
report.region = "us-east-1"
if "PasswordLastUsed" in user and user["PasswordLastUsed"] != "": if "PasswordLastUsed" in user and user["PasswordLastUsed"] != "":
try: try:
time_since_insertion = ( time_since_insertion = (
@@ -22,23 +25,21 @@ class iam_disable_30_days_credentials(Check):
) )
if time_since_insertion.days > maximum_expiration_days: if time_since_insertion.days > maximum_expiration_days:
report.status = "FAIL" report.status = "FAIL"
report.result_extended = f"User {user['UserName']} has not logged into the console in the past 30 days" report.status_extended = f"User {user['UserName']} has not logged into the console in the past 30 days"
report.region = iam_client.region
else: else:
report.status = "PASS" report.status = "PASS"
report.result_extended = f"User {user['UserName']} has logged into the console in the past 30 days" report.status_extended = f"User {user['UserName']} has logged into the console in the past 30 days"
report.region = iam_client.region
except KeyError: except KeyError:
pass pass
else: else:
report.status = "PASS" report.status = "PASS"
report.result_extended = f"User {user['UserName']} has not a console password or is unused." report.status_extended = f"User {user['UserName']} has not a console password or is unused."
report.region = iam_client.region
# Append report # Append report
findings.append(report) findings.append(report)
else: else:
report = Check_Report() report = Check_Report(self.metadata)
report.status = "PASS" report.status = "PASS"
report.result_extended = "There is no IAM users" report.result_extended = "There is no IAM users"
report.region = iam_client.region report.region = iam_client.region

View File

@@ -13,7 +13,10 @@ class iam_disable_90_days_credentials(Check):
if response: if response:
for user in response: for user in response:
report = Check_Report() report = Check_Report(self.metadata)
report.region = "us-east-1"
report.resource_id = user["UserName"]
report.resource_arn = user["Arn"]
if "PasswordLastUsed" in user and user["PasswordLastUsed"] != "": if "PasswordLastUsed" in user and user["PasswordLastUsed"] != "":
try: try:
time_since_insertion = ( time_since_insertion = (
@@ -22,25 +25,23 @@ class iam_disable_90_days_credentials(Check):
) )
if time_since_insertion.days > maximum_expiration_days: if time_since_insertion.days > maximum_expiration_days:
report.status = "FAIL" report.status = "FAIL"
report.result_extended = f"User {user['UserName']} has not logged into the console in the past 90 days" report.status_extended = f"User {user['UserName']} has not logged into the console in the past 90 days"
report.region = iam_client.region
else: else:
report.status = "PASS" report.status = "PASS"
report.result_extended = f"User {user['UserName']} has logged into the console in the past 90 days" report.status_extended = f"User {user['UserName']} has logged into the console in the past 90 days"
report.region = iam_client.region
except KeyError: except KeyError:
pass pass
else: else:
report.status = "PASS" report.status = "PASS"
report.result_extended = f"User {user['UserName']} has not a console password or is unused."
report.region = iam_client.region
report.status_extended = f"User {user['UserName']} has not a console password or is unused."
# Append report # Append report
findings.append(report) findings.append(report)
else: else:
report = Check_Report() report = Check_Report(self.metadata)
report.status = "PASS" report.status = "PASS"
report.result_extended = "There is no IAM users" report.status_extended = "There is no IAM users"
report.region = iam_client.region report.region = "us-east-1"
return findings return findings

41
prowler
View File

@@ -3,7 +3,10 @@
import argparse import argparse
import sys import sys
from os import mkdir
from os.path import isdir
from config.config import default_output_directory
from lib.banner import print_banner, print_version from lib.banner import print_banner, print_version
from lib.check.check import ( from lib.check.check import (
bulk_load_checks_metadata, bulk_load_checks_metadata,
@@ -38,7 +41,7 @@ if __name__ == "__main__":
"--severity", "--severity",
nargs="+", nargs="+",
help="List of severities [informational, low, medium, high, critical]", help="List of severities [informational, low, medium, high, critical]",
choices=["informational","low","medium","high","critical"] choices=["informational", "low", "medium", "high", "critical"],
) )
# Exclude checks options # Exclude checks options
parser.add_argument("-e", "--excluded-checks", nargs="+", help="Checks to exclude") parser.add_argument("-e", "--excluded-checks", nargs="+", help="Checks to exclude")
@@ -116,6 +119,21 @@ if __name__ == "__main__":
nargs="+", nargs="+",
help="AWS region names to run Prowler against", help="AWS region names to run Prowler against",
) )
parser.add_argument(
"-M",
"--output-modes",
nargs="+",
help="Output mode, by default csv",
choices=["csv"],
)
parser.add_argument(
"-o",
"--custom-output-directory",
nargs="?",
help="Custom output directory, by default the folder where Prowler is stored",
default=default_output_directory,
)
# Parse Arguments # Parse Arguments
args = parser.parse_args() args = parser.parse_args()
@@ -127,7 +145,9 @@ if __name__ == "__main__":
services = args.services services = args.services
groups = args.groups groups = args.groups
checks_file = args.checks_file checks_file = args.checks_file
output_directory = args.custom_output_directory
severities = args.severity severities = args.severity
output_modes = args.output_modes
# Set Logger configuration # Set Logger configuration
set_logging_config(args.log_file, args.log_level) set_logging_config(args.log_file, args.log_level)
@@ -141,6 +161,17 @@ if __name__ == "__main__":
logger.critical("To use -I/-T options -R option is needed") logger.critical("To use -I/-T options -R option is needed")
sys.exit() sys.exit()
# Check output directory, if it is default and not created -> create it
# If is custom and not created -> error
if output_directory:
if not isdir(output_directory):
if output_directory == default_output_directory:
if output_modes:
mkdir(default_output_directory)
else:
logger.critical("Output directory does not exist")
sys.exit()
if args.version: if args.version:
print_version() print_version()
sys.exit() sys.exit()
@@ -192,10 +223,12 @@ if __name__ == "__main__":
sys.exit() sys.exit()
# Setting output options # Setting output options
set_output_options(args.quiet) audit_output_options = set_output_options(
args.quiet, output_modes, output_directory
)
# Set global session # Set global session
provider_set_session( audit_info = provider_set_session(
args.profile, args.profile,
args.role, args.role,
args.session_duration, args.session_duration,
@@ -218,7 +251,7 @@ if __name__ == "__main__":
check_to_execute = getattr(lib, check_name) check_to_execute = getattr(lib, check_name)
c = check_to_execute() c = check_to_execute()
# Run check # Run check
run_check(c) run_check(c, audit_info, audit_output_options)
# If check does not exists in the provider or is from another provider # If check does not exists in the provider or is from another provider
except ModuleNotFoundError: except ModuleNotFoundError: