diff --git a/DocLinkValidity.py b/DocLinkValidity.py index ffe12e1..ee5ac83 100644 --- a/DocLinkValidity.py +++ b/DocLinkValidity.py @@ -38,7 +38,7 @@ print('.', end="", flush=True) conn = Request(url) conn.add_header('User-Agent', 'aws-cli') - resp = urlopen(conn) + resp = urlopen(conn, timeout=10) if resp.getcode() != 200: if e.code not in invalidRefDict: diff --git a/Screener.py b/Screener.py index 882726b..aad0736 100644 --- a/Screener.py +++ b/Screener.py @@ -9,6 +9,7 @@ from services.Reporter import Reporter from services.PageBuilder import PageBuilder from services.dashboard.DashboardPageBuilder import DashboardPageBuilder +from utils.CustomPage.CustomPage import CustomPage from utils.Tools import _warn, _info from frameworks.FrameworkPageBuilder import FrameworkPageBuilder @@ -44,7 +45,11 @@ def scanByService(service, regions, filters): scannedKey = 'scanned_'+service[0] globalKey = 'GLOBALRESOURCES_'+service[0] Config.set(scannedKey, _zeroCount) - + + ## CustomPage Enhancement + cp = CustomPage() + cp.resetOutput(service[0]) + for region in _regions: reg = region if region == 'GLOBAL': @@ -103,6 +108,9 @@ def scanByService(service, regions, filters): with open(_C.FORK_DIR + '/' + service[0] + '.stat.json', 'w') as f: json.dump(scanned, f) + + cp.writeOutput(service[0].lower()) + @staticmethod def getServiceModuleDynamically(service): diff --git a/frameworks/CIS/map.json b/frameworks/CIS/map.json index d2d766e..cfe52e8 100644 --- a/frameworks/CIS/map.json +++ b/frameworks/CIS/map.json @@ -13,25 +13,25 @@ "2": ["cloudtrail.RequiresKmsKey"], "4": ["cloudtrail.LogFileValidationEnabled"], "5": ["cloudtrail.CloudWatchLogsLogGroupArn"], - "6": [], + "6": ["cloudtrail.EnableS3PublicAccessBlock"], "7": ["cloudtrail.EnableTrailS3BucketLogging"] }, "CloudWatch.": { - "1": [], - "4": [], - "5": [], - "6": [], - "7": [], - "8": [], - "9": [], - "10": [], - "11": [], - "12": [], - "13": [], - "14": [] + "1": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMAroot1"], + "4": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMAalarm4"], + "5": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMATrail5"], + "6": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMAAuthFail6"], + "7": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMACMK7"], + "8": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMAS3Policy8"], + "9": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMAConfig9"], + "10": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMASecGroup10"], + "11": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMANACL11"], + "12": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMAGateway12"], + "13": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMARouteTable13"], + "14": ["cloudtrail.NeedToEnableCloudTrail", "cloudwatch.trailWithoutCWLogs", "cloudwatch.trailWithCWLogsWithoutMetrics", "cloudwatch.trailWOMAVPC14"] }, "Config.": { - "1": [] + "1": ["iam.EnableConfigService", "iam.PartialEnableConfigService"] }, "EC2.": { "2": ["ec2.SGDefaultDisallowTraffic"], @@ -48,7 +48,7 @@ "15": ["iam.passwordPolicyLength"], "16": ["iam.passwordPolicyReuse"], "18": [], - "22": ["iam.consoleLastAccess45,iam.consoleLastAccess90, iam.consoleLastAccess365"] + "22": ["iam.consoleLastAccess45", "iam.consoleLastAccess90", "iam.consoleLastAccess365"] }, "KMS.": { "4": ["kms.KeyRotationEnabled"] diff --git a/frameworks/FrameworkPageBuilder.py b/frameworks/FrameworkPageBuilder.py index 73361ed..2f0b5d5 100644 --- a/frameworks/FrameworkPageBuilder.py +++ b/frameworks/FrameworkPageBuilder.py @@ -5,6 +5,7 @@ from frameworks.SSB.SSB import SSB from frameworks.WAFS.WAFS import WAFS from frameworks.MSR.MSR import MSR +from frameworks.CIS.CIS import CIS class FrameworkPageBuilder(PageBuilder): COMPLIANCE_STATUS = ["Not available", "Compliant", "Need Attention"] @@ -28,6 +29,13 @@ class FrameworkPageBuilder(PageBuilder): HTML_TABLE_ID = 'screener-framework' + colorCustomHex = ["#17a2b8", "#28a745", "#dc3545"] + colorCustomRGB = [ + [23, 40, 220], + [162, 167, 53], + [184, 69, 69] + ] + def __init__(self, service=None, reporter=None): framework = service super().__init__(framework, reporter) diff --git a/info.json b/info.json index d582e53..be58df8 100644 --- a/info.json +++ b/info.json @@ -1 +1 @@ -{"cloudfront": 8, "cloudtrail": 17, "dynamodb": 24, "ec2": 49, "efs": 3, "eks": 7, "elasticache": 10, "guardduty": 4, "iam": 32, "kms": 4, "lambda": 14, "opensearch": 18, "rds": 77, "s3": 12} \ No newline at end of file +{"cloudfront": 8, "cloudtrail": 18, "cloudwatch": 18, "dynamodb": 24, "ec2": 49, "efs": 3, "eks": 7, "elasticache": 10, "guardduty": 4, "iam": 37, "kms": 4, "lambda": 15, "opensearch": 18, "rds": 82, "s3": 12} \ No newline at end of file diff --git a/main.py b/main.py index 40e6f21..e793f9e 100644 --- a/main.py +++ b/main.py @@ -135,6 +135,10 @@ def number_format(num, places=2): ## Can pass in True for RegionSelector to skip prompt regions = AwsRegionSelector.get_all_enabled_regions(flagSkipPromptForRegionConfirmation) + + if acctLoop == 1: + Config.set('REGIONS_SELECTED', regions) + frameworks = [] if len(_cli_options['frameworks']) > 0: frameworks = _cli_options['frameworks'].split(',') @@ -209,7 +213,7 @@ def number_format(num, places=2): hasGlobal = False for file in os.listdir(_C.FORK_DIR): - if file[0] == '.' or file == _C.SESSUID_FILENAME or file == 'tail.txt' or file == 'error.txt' or file == 'empty.txt' or file == 'all.csv': + if file[0] == '.' or file == _C.SESSUID_FILENAME or file == 'tail.txt' or file == 'error.txt' or file == 'empty.txt' or file == 'all.csv' or file[0:10] == 'CustomPage': continue f = file.split('.') if len(f) == 2: diff --git a/services/Evaluator.py b/services/Evaluator.py index 3a8f12a..c47faa3 100644 --- a/services/Evaluator.py +++ b/services/Evaluator.py @@ -3,6 +3,7 @@ from utils.Config import Config from utils.Tools import _warn, _info +from utils.CustomPage.CustomPage import CustomPage import constants as _C class Evaluator(): @@ -114,4 +115,9 @@ def __del__(self): return scanned.append(';'.join([Config.get(classPrefix), driver, name, hasError])) - Config.set(ConfigKey, scanned) \ No newline at end of file + Config.set(ConfigKey, scanned) + + + ## Handle custom page requirement + cp = CustomPage() + cp.trackInfo(driver, name, self.results) \ No newline at end of file diff --git a/services/PageBuilder.py b/services/PageBuilder.py index 78e84a3..42f3d0f 100644 --- a/services/PageBuilder.py +++ b/services/PageBuilder.py @@ -9,18 +9,21 @@ class PageBuilder: serviceIcon = { + 'cloudfront': 'wifi', + 'cloudtrail': 'user-secret', + 'cloudwatch': 'clock', + 'dynamodb': 'bars', 'ec2': 'server', - 'rds': 'database', - 's3': 'hdd', - 'iam': 'users', - 'guardduty': 'shield-alt', - 'opensearch': 'warehouse', 'efs': 'network-wired', 'eks': 'box', - 'cloudfront': 'wifi', - 'elasticache': 'store', + 'elasticache': 'store', + 'guardduty': 'shield-alt', + 'iam': 'users', + 'kms': 'key', 'lambda': 'calculator', - 'cloudtrail': 'user-secret' + 'opensearch': 'warehouse', + 'rds': 'database', + 's3': 'hdd' } frameworkIcon = 'tasks' @@ -36,6 +39,9 @@ class PageBuilder: } isHome = False + + colorCustomHex = None + colorCustomRGB = None def __init__(self, service, reporter): self.service = service @@ -316,9 +322,14 @@ def _enrichChartData(self, datasets): return arr def _randomRGB(self, idx): - r1Arr = [226, 168, 109, 80 , 51 , 60 , 70 , 89 , 108] - r2Arr = [124, 100, 75 , 63 , 51 , 78 , 105, 158, 212] - r3Arr = [124, 100, 75 , 63 , 51 , 75 , 100, 148, 197] + if self.colorCustomRGB == None: + r1Arr = [226, 168, 109, 80 , 51 , 60 , 70 , 89 , 108] + r2Arr = [124, 100, 75 , 63 , 51 , 78 , 105, 158, 212] + r3Arr = [124, 100, 75 , 63 , 51 , 75 , 100, 148, 197] + else: + r1Arr = self.colorCustomRGB[0] + r2Arr = self.colorCustomRGB[1] + r3Arr = self.colorCustomRGB[2] if idx >= len(r1Arr): idx = idx%len(r1Arr) @@ -330,7 +341,11 @@ def _randomRGB(self, idx): return "rgba({}, {}, {}, 1)".format(r1, r2, r3) def _randomHexColorCode(self, idx): - color = ["#e27c7c", "#a86464", "#6d4b4b", "#503f3f", "#333333", "#3c4e4b", "#466964", "#599e94", "#6cd4c5"] + if self.colorCustomHex == None: + color = ["#e27c7c", "#a86464", "#6d4b4b", "#503f3f", "#333333", "#3c4e4b", "#466964", "#599e94", "#6cd4c5"] + else: + color = self.colorCustomHex + if idx >= len(color): idx = idx%len(color) # return '#' + str(hex(random.randint(0, 0xFFFFFF))).lstrip('0x').rjust(6, '0') @@ -469,12 +484,12 @@ def buildNav(self): sidebarPRE = sidebarPRE.replace('{$ISHOME}', ISHOME) output.append(sidebarPRE) - arr = self.buildNavCustomItems('Services', self.services) - output.append("\n".join(arr)) - arr = self.buildNavCustomItems('Frameworks', self.frameworks) output.append("\n".join(arr)) + arr = self.buildNavCustomItems('Services', self.services) + output.append("\n".join(arr)) + sidebarPOST = open(self._getTemplateByKey('sidebar.postcustom'), 'r').read() output.append(sidebarPOST) @@ -486,17 +501,21 @@ def buildNavCustomItems(self, title, lists): services = lists activeService = self.service - output = [] - output.append("".format(title)) - if title == 'Frameworks': + title = 'Compliances / Frameworks' services = {} for l in lists: services[l] = 0 else: services = lists + + output = [] + output.append("".format(title)) + + _services = sorted(services) - for name, count in services.items(): + for name in _services: + count = services[name] if name == activeService: class_ = 'active' else: diff --git a/services/cloudtrail/cloudtrail.reporter.json b/services/cloudtrail/cloudtrail.reporter.json index 75e68e7..44b5dbe 100644 --- a/services/cloudtrail/cloudtrail.reporter.json +++ b/services/cloudtrail/cloudtrail.reporter.json @@ -123,6 +123,19 @@ "[Resilience in CloudTrail]" ] }, + "EnableS3PublicAccessBlock": { + "category": "S", + "^description": "You have disabled public access block on {$COUNT} S3 buckets.This leaves your data to be accessible by anyone. Unless absolutely necessary, enable public access block. Block Public Access at the S3 bucket level provides controls to ensure that objects never have public access. Public access is granted to buckets and objects through access control lists (ACLs), bucket policies, or both.", + "shortDesc": "Block S3 Public Access", + "criticality": "H", + "downtime": 0, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 1, + "ref": [ + "[Block S3 Public Access]" + ] + }, "HasInsightSelectors": { "category": "O", "^description": "CloudTrail Insights analyzes your normal patterns of API call volume and API error rates, also called the baseline, and generates Insights events when the call volume or error rates are outside normal patterns. Insights events on API call volume are generated for write management APIs, and Insights events on API error rate are generated for both read and write management APIs.", diff --git a/services/cloudtrail/drivers/CloudtrailCommon.py b/services/cloudtrail/drivers/CloudtrailCommon.py index dc74145..15fdba8 100644 --- a/services/cloudtrail/drivers/CloudtrailCommon.py +++ b/services/cloudtrail/drivers/CloudtrailCommon.py @@ -94,7 +94,21 @@ def _checkS3BucketSettings(self): ## For safety purpose, though all trails must have bucket if 'S3BucketName' in self.trailInfo and len(self.trailInfo['S3BucketName']) > 0: s3Bucket = self.trailInfo['S3BucketName'] - # print("Bucket Name {}".format(s3Bucket)) + print(s3Bucket) + # help me retrieve s3 bucket public + try: + resp = self.s3Client.get_public_access_block( + Bucket=s3Bucket + ) + + for param, val in resp['PublicAccessBlockConfiguration'].items(): + if val == False: + self.results['EnableS3PublicAccessBlock'] = [-1, None] + break + + except botocore.exceptions.ClientError as e: + print('-- Unable to capture Public Access Block settings:', e.response['Error']['Code']) + try: r = self.s3Client.get_bucket_versioning( Bucket=s3Bucket @@ -109,7 +123,7 @@ def _checkS3BucketSettings(self): self.results['EnableTrailS3BucketVersioning'] = [-1, ''] except botocore.exceptions.ClientError as e: - print('Unable to capture S3 MFA settings:', e.response['Error']['Code']) + print('-- Unable to capture S3 MFA settings:', e.response['Error']['Code']) try: r = self.s3Client.get_bucket_logging( @@ -119,7 +133,7 @@ def _checkS3BucketSettings(self): if logEnable == None or not type(logEnable) is dict: self.results['EnableTrailS3BucketLogging'] = [-1, ''] except botocore.exceptions.ClientError as e: - print('Unable to capture S3 Logging settings:', e.response['Error']['Code']) + print('-- Unable to capture S3 Logging settings:', e.response['Error']['Code']) try: resp = self.s3Client.get_bucket_lifecycle( diff --git a/services/cloudwatch/Cloudwatch.py b/services/cloudwatch/Cloudwatch.py new file mode 100644 index 0000000..4b414db --- /dev/null +++ b/services/cloudwatch/Cloudwatch.py @@ -0,0 +1,107 @@ +import boto3 +import botocore +import requests + +from utils.Config import Config +from services.Service import Service + +###### TO DO ##### +## Import required service module below +## Example +from services.cloudwatch.drivers.CloudwatchCommon import CloudwatchCommon +from services.cloudwatch.drivers.CloudwatchTrails import CloudwatchTrails + + +###### TO DO ##### +## Replace ServiceName with +## getResources and advise method is default method that must have +## Feel free to develop method to support your checks +class Cloudwatch(Service): + def __init__(self, region): + super().__init__(region) + ssBoto = self.ssBoto + + self.cwClient = ssBoto.client('cloudwatch', config=self.bConfig) + self.cwLogClient = ssBoto.client('logs', config=self.bConfig) + self.ctClient = ssBoto.client('cloudtrail', config=self.bConfig) + + self.ctLogs = [] + self.logGroups = [] + + return + + ## method to get resources for the services + ## return the array of the resources + def loopTrail(self, NextToken=None): + args = {} + if NextToken: + args['NextToken'] = NextToken + + resp = self.ctClient.list_trails(**args) + trails = resp.get('Trails') + for trail in trails: + if trail['HomeRegion'] == self.region: + info = self.ctClient.describe_trails(trailNameList=[trail['TrailARN']]) + tl = info.get('trailList')[0] + if 'CloudWatchLogsLogGroupArn' in tl: + logGroupName = tl['CloudWatchLogsLogGroupArn'].split(':')[6] + self.ctLogs.append([trail['TrailARN'], tl['CloudWatchLogsLogGroupArn'], logGroupName]) + else: + self.ctLogs.append([trail['TrailARN'], None, None]) + + if resp.get('NextToken'): + self.loopTrail(resp.get('NextToken')) + + def getAllLogs(self, nextToken=None): + args = {} + if nextToken: + args['nextToken'] = nextToken + + resp = self.cwLogClient.describe_log_groups(**args) + logGroups = resp.get('logGroups') + for lg in logGroups: + self.logGroups.append({ + 'logGroupName': lg['logGroupName'], + 'storedBytes': lg['storedBytes'], + 'retentionInDays': lg['retentionInDays'] if 'retentionInDays' in lg else -1, + 'dataProtectionStatus': lg['dataProtectionStatus'] if 'dataProtectionStatus' in lg else '' + }) + + if resp.get('nextToken'): + self.getAllLogs(resp.get('nextToken')) + + def advise(self): + objs = {} + + self.loopTrail() + for log in self.ctLogs: + print("... (Cloudwatch Logs) inspecting CloudTrail's related LogGroup [{}]".format(log[0])) + obj = CloudwatchTrails(log, log[2], self.cwLogClient) + obj.run(self.__class__) + + objs[f"ctLog::{log[0]}"] = obj.getInfo() + del obj + + self.getAllLogs() + for log in self.logGroups: + print("... (Cloudwatch Logs inspecting LogGroup [{}]".format(log['logGroupName'])) + obj = CloudwatchCommon(log, self.cwLogClient) + obj.run(self.__class__) + + objs[f"Log::{log['logGroupName']}"] = obj.getInfo() + del obj + ###### TO DO ##### + ## call getResources method + ## loop through the resources and run the checks in drivers + ## Example + # instances = self.getResources() + # for instance in instances: + # instanceData = instance['Instances'][0] + # print('... (EC2) inspecting ' + instanceData['InstanceId']) + # obj = Ec2Instance(instanceData,self.ec2Client, self.cwClient) + # obj.run(self.__class__) + + # objs[f"EC2::{instanceData['InstanceId']}"] = obj.getInfo() + #. del obj + + return objs \ No newline at end of file diff --git a/services/cloudwatch/cloudwatch.reporter.json b/services/cloudwatch/cloudwatch.reporter.json new file mode 100644 index 0000000..0a2ccd5 --- /dev/null +++ b/services/cloudwatch/cloudwatch.reporter.json @@ -0,0 +1,236 @@ +{ + "trailWOMAroot1": { + "category": "O", + "^description": "No alarm found for usage of 'root' user. The root user has unrestricted access to all services and resources in an AWS account. We highly recommend that you avoid using the root user for daily tasks. Minimizing the use of the root user and adopting the principle of least privilege for access management reduce the risk of accidental changes and unintended disclosure of highly privileged credentials.", + "shortDesc": "Create alarm: 'root' user usage", + "criticality": "H", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 1]" + ] + }, + "trailWOMAunauthAPI2": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm unauthorized API calls. Monitoring unauthorized API calls helps reveal application errors and might reduce time to detect malicious activity.", + "shortDesc": "Create alarm: unauthorized API calls", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 2]" + ] + }, + "trailWOMAnoMFA3": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm console logins that aren't protected by MFA. Monitoring for single-factor console logins increases visibility into accounts that aren't protected by MFA.", + "shortDesc": "Create alarm: Console Sign-in without MFA", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 3]" + ] + }, + "trailWOMAalarm4": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes made to IAM policies. Monitoring these changes helps ensure that authentication and authorization controls remain intact.", + "shortDesc": "Create alarm: IAM policy changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 4]" + ] + }, + "trailWOMATrail5": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to CloudTrail configuration settings. Monitoring these changes helps ensure sustained visibility to activities in the account.", + "shortDesc": "Create alarm: CloudTrail changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 5]" + ] + }, + "trailWOMAAuthFail6": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for failed console authentication attempts. Monitoring failed console logins might decrease lead time to detect an attempt to brute-force a credential, which might provide an indicator, such as source IP, that you can use in other event correlations.", + "shortDesc": "Create alarm: Auth Fail", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 6]" + ] + }, + "trailWOMACMK7": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for customer managed keys that have changed state to disabled or scheduled deletion. Data encrypted with disabled or deleted keys is no longer accessible.", + "shortDesc": "Create alarm: CMK changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 7]" + ] + }, + "trailWOMAS3Policy8": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to S3 bucket policies. Monitoring these changes might reduce time to detect and correct permissive policies on sensitive S3 buckets.", + "shortDesc": "Create alarm: S3 Bucket Policy changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 8]" + ] + }, + "trailWOMAConfig9": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to AWS Config configuration settings. Monitoring these changes helps ensure sustained visibility of configuration items in the account.", + "shortDesc": "Create alarm: ConfigService changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 9]" + ] + }, + "trailWOMASecGroup10": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to security groups. Monitoring these changes helps ensure that resources and services aren't unintentionally exposed.", + "shortDesc": "Create alarm: SecurityGroups changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 10]" + ] + }, + "trailWOMANACL11": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to NACLs. Monitoring these changes helps ensure that AWS resources and services aren't unintentionally exposed.", + "shortDesc": "Create alarm: NACL changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 11]" + ] + }, + "trailWOMAGateway12": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to network gateways. Monitoring these changes helps ensure that all ingress and egress traffic traverses the VPC border via a controlled path.", + "shortDesc": "Create alarm: Network Gateway changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 12]" + ] + }, + "trailWOMASecGroup13": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to route tables. Monitoring these changes helps ensure that all VPC traffic flows through an expected path.", + "shortDesc": "Create alarm: Route Table changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 13]" + ] + }, + "trailWOMAGateway14": { + "category": "O", + "^description": "CIS recommends that you create a metric filter and alarm for changes to VPCs. Monitoring these changes helps ensure that authentication and authorization controls remain intact.", + "shortDesc": "Create alarm: VPC Changes", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 14]" + ] + }, + "trailWithoutCWLogs": { + "category": "O", + "^description": "CIS recommends that all CloudTrail to have logs store in CloudWatch so relevant CloudWatch log metrics control can be implement to heighten both AWS Security Pillar and Operation Excellence Pillar", + "shortDesc": "CloudTrail to have CloudWatch Log", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Controls]" + ] + }, + "trailWithCWLogsWithoutMetrics": { + "category": "O", + "^description": "No metrics found in CloudWatch Log Group. CIS recommends that you create relevant metric filters and alarms for CloudTrail's log that store in CloudWatch Log", + "shortDesc": "CloudTrail's Log to have log metrics", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Controls]" + ] + }, + "SetRetentionDays":{ + "category": "C", + "^description": "No retention day set in CloudWatch Log Groups. It incurs charges based on storage size", + "shortDesc": "Set retention days", + "criticality": "H", + "downtime": 0, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Controls]" + ] + }, + "CISRetentionAtLeast1Yr":{ + "category": "O", + "^description": "CloudWatch Logs centralize logs from all of your systems, applications, and AWS services in a single, highly scalable service. You can use CloudWatch Logs to monitor, store, and access your log files from Amazon Elastic Compute Cloud (EC2) instances, AWS CloudTrail, Amazon Route 53, and other sources. Retaining your logs for at least 1 year can help you comply with log retention standards.", + "shortDesc": "To have at least 365 days retention", + "criticality": "M", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[CIS Cloudwatch Guide 16]" + ] + } +} \ No newline at end of file diff --git a/services/cloudwatch/drivers/CloudwatchCommon.py b/services/cloudwatch/drivers/CloudwatchCommon.py new file mode 100644 index 0000000..9bc3ac8 --- /dev/null +++ b/services/cloudwatch/drivers/CloudwatchCommon.py @@ -0,0 +1,35 @@ +import boto3 +import botocore +import constants as _C + +from services.Evaluator import Evaluator + +###### TO DO ##### +## Import modules that needed for this driver +## Example +## from services.ec2.drivers.Ec2SecGroup import Ec2SecGroup + +###### TO DO ##### +## Replace ServiceDriver with + +class CloudwatchCommon(Evaluator): + + ###### TO DO ##### + ## Replace resource variable to meaningful name + ## Modify based on your need + def __init__(self, log, logClient): + super().__init__() + self.init() + + self.log = log + self.logClient = logClient + return + + ###### TO DO ##### + ## Change the method name to meaningful name + ## Check methods name must follow _check[Description] + def _checkRetention(self): + if self.log['retentionInDays'] == -1: + self.results['SetRetentionDays'] = [-1, "{} MB".format(self.log['storedBytes']/1024/1024)] + elif self.log['retentionInDays'] <= 365: + self.results['CISRetentionAtLeast1Yr'] = [-1, self.log['retentionInDays']] \ No newline at end of file diff --git a/services/cloudwatch/drivers/CloudwatchTrails.py b/services/cloudwatch/drivers/CloudwatchTrails.py new file mode 100644 index 0000000..8d71969 --- /dev/null +++ b/services/cloudwatch/drivers/CloudwatchTrails.py @@ -0,0 +1,238 @@ +import boto3 +import botocore +import constants as _C + +from services.Evaluator import Evaluator +from utils.Config import Config +import re + +###### TO DO ##### +## Import modules that needed for this driver +## Example +## from services.ec2.drivers.Ec2SecGroup import Ec2SecGroup + +###### TO DO ##### +## Replace ServiceDriver with + +class CloudwatchTrails(Evaluator): + ## WOMA = without metrics & alarm + ## "$.userIdentity.type", "=", "Root" + ## ==> \$.userIdentity.type\s*=\s*[\'\"]*Root[\'\"']* + CISMetricsMap = [ + {'trailWOMAroot1': [ + ["$.userIdentity.type", "=", "Root"] + ] + }, + {'trailWOMAunauthAPI2': [ + ["$.errorCode", "=", "\*UnauthorizedOperation"], + ["$.errorCode", "=", "AccessDenied\*"] + ] + }, + {'trailWOMAnoMFA3': [ + ["$.eventName", "=", "ConsoleLogin"], + ["$.additionalEventData.MFAUsed", "!=", "Yes"], + ["$.userIdentity.type", "=", "IAMUser"], + ["$.responseElements.ConsoleLogin", "=", "Success"] + ] + }, + {'trailWOMAalarm4': [ + ["$.eventSource", "=", "iam.amazonaws.com"], + ["$.eventName", "=", "DeleteGroupPolicy"], + ["$.eventName", "=", "DeleteRolePolicy"], + ["$.eventName", "=", "DeleteUserPolicy"], + ["$.eventName", "=", "PutGroupPolicy"], + ["$.eventName", "=", "PutRolePolicy"], + ["$.eventName", "=", "PutUserPolicy"], + ["$.eventName", "=", "CreatePolicy"], + ["$.eventName", "=", "DeletePolicy"], + ["$.eventName", "=", "CreatePolicyVersion"], + ["$.eventName", "=", "DeletePolicyVersion"], + ["$.eventName", "=", "AttachRolePolicy"], + ["$.eventName", "=", "DetachRolePolicy"], + ["$.eventName", "=", "AttachUserPolicy"], + ["$.eventName", "=", "DetachUserPolicy"], + ["$.eventName", "=", "AttachGroupPolicy"], + ["$.eventName", "=", "DetachGroupPolicy"] + ] + }, + {'trailWOMATrail5': [ + ["$.eventName", "=", "CreateTrail"], + ["$.eventName", "=", "UpdateTrail"], + ["$.eventName", "=", "DeleteTrail"], + ["$.eventName", "=", "StartLogging"], + ["$.eventName", "=", "StopLogging"], + ] + }, + {'trailWOMAAuthFail6': [ + ["$.eventName", "=", "ConsoleLogin"], + ["$.errorMessage", "=", "Failed authentication"] + ] + }, + {'trailWOMACMK7': [ + ["$.eventSource", "=", "kms.amazonaws.com"], + ["$.eventName", "=", "DisableKey"], + ["$.eventName", "=", "ScheduleKeyDeletion"] + ] + }, + {'trailWOMAS3Policy8': [ + ["$.eventSource", "=", "s3.amazonaws.com"], + ["$.eventName", "=", "PutBucketAcl"], + ["$.eventName", "=", "PutBucketPolicy"], + ["$.eventName", "=", "PutBucketCors"], + ["$.eventName", "=", "PutBucketLifecycle"], + ["$.eventName", "=", "PutBucketReplication"], + ["$.eventName", "=", "DeleteBucketPolicy"], + ["$.eventName", "=", "DeleteBucketCors"], + ["$.eventName", "=", "DeleteBucketLifecycle"], + ["$.eventName", "=", "DeleteBucketReplication"] + ] + }, #9-14 + {'trailWOMAConfig9': [ + ["$.eventSource", "=", "config.amazonaws.com"], + ["$.eventName", "=", "StopConfigurationRecorder"], + ["$.eventName", "=", "DeleteDeliveryChannel"], + ["$.eventName", "=", "PutDeliveryChannel"], + ["$.eventName", "=", "PutConfigurationRecorder"] + ] + }, + {'trailWOMASecGroup10': [ + ["$.eventName", "=", "AuthorizeSecurityGroupIngress"], + ["$.eventName", "=", "AuthorizeSecurityGroupEgress"], + ["$.eventName", "=", "RevokeSecurityGroupIngress"], + ["$.eventName", "=", "RevokeSecurityGroupEgress"], + ["$.eventName", "=", "CreateSecurityGroup"], + ["$.eventName", "=", "DeleteSecurityGroup"] + ] + }, + {'trailWOMANACL11': [ + ["$.eventName", "=", "CreateNetworkAcl"], + ["$.eventName", "=", "CreateNetworkAclEntry"], + ["$.eventName", "=", "DeleteNetworkAcl"], + ["$.eventName", "=", "DeleteNetworkAclEntry"], + ["$.eventName", "=", "ReplaceNetworkAclEntry"], + ["$.eventName", "=", "ReplaceNetworkAclAssociation"] + ] + }, + {'trailWOMAGateway12': [ + ["$.eventName", "=", "CreateCustomerGateway"], + ["$.eventName", "=", "DeleteCustomerGateway"], + ["$.eventName", "=", "AttachInternetGateway"], + ["$.eventName", "=", "CreateInternetGateway"], + ["$.eventName", "=", "DeleteInternetGateway"], + ["$.eventName", "=", "DetachInternetGateway"] + ] + }, + {'trailWOMARouteTable13': [ + ["$.eventSource", "=", "ec2.amazonaws.com"], + ["$.eventName", "=", "CreateRoute"], + ["$.eventName", "=", "CreateRouteTable"], + ["$.eventName", "=", "ReplaceRoute"], + ["$.eventName", "=", "ReplaceRouteTableAssociation"], + ["$.eventName", "=", "DeleteRouteTable"], + ["$.eventName", "=", "DeleteRoute"], + ["$.eventName", "=", "DisassociateRouteTable"] + ] + }, + {'trailWOMAVPC14': [ + ["$.eventName", "=", "CreateVpc"], + ["$.eventName", "=", "DeleteVpc"], + ["$.eventName", "=", "ModifyVpcAttribute"], + ["$.eventName", "=", "AcceptVpcPeeringConnection"], + ["$.eventName", "=", "CreateVpcPeeringConnection"], + ["$.eventName", "=", "DeleteVpcPeeringConnection"], + ["$.eventName", "=", "RejectVpcPeeringConnection"], + ["$.eventName", "=", "AttachClassicLinkVpc"], + ["$.eventName", "=", "DetachClassicLinkVpc"], + ["$.eventName", "=", "DisableVpcClassicLink"], + ["$.eventName", "=", "EnableVpcClassicLink"] + ] + } + ] + + CISMetricsMapRegex = {} + logMetricsFilterPattern = [] + + def __init__(self, log, logname, logClient): + super().__init__() + self.init() + + self.logClient = logClient + self.log = log + self.logname = logname + + self.metricsInfo = [] + + self.CISMetricsMapRegex = Config.get('Logs::CISMetricsMapRegex', {}) + if len(self.CISMetricsMapRegex) == 0: + for lists in self.CISMetricsMap: + for check, rules in lists.items(): + self.CISMetricsMapRegex[check] = self.regexBuilder(rules) + + Config.set('Logs::CISMetricsMapRegex', self.CISMetricsMapRegex) + + return + + def regexBuilder(self, rules): + regexPatterns = [] + for rule in rules: + regexPattern = "\\" + rule[0] + "\s*\\" + rule[1] + "\s*[\\'\\\"]*" + rule[2] + "[\\'\\\"]*" + regexPatterns.append(regexPattern) + + return regexPatterns + + # Loop available cloudwatch log metrics in logGroup + def getAllMetrics(self, nextToken=None): + args = {"logGroupName": self.log[2]} + if nextToken: + args['nextToken'] = nextToken + + resp = self.logClient.describe_metric_filters(**args) + metricFilters = resp.get('metricFilters') + for filters in metricFilters: + self.logMetricsFilterPattern.append(filters['filterPattern']) + + if resp.get('nextToken'): + self.getAllMetrics(resp.get('nextToken')) + + # write a function to loop through all regex pattern in self.CISMetricsMapRegex, and regex checks again array of string in self.logMetricsFilterPattern + def regexFindCISPatterns(self): + for check, rules in self.CISMetricsMapRegex.items(): + self.results[check] = [-1, None] + for pattern in self.logMetricsFilterPattern: + # print("-=-=-=-=- " + check + "=-=-=-=-=-=-") + cnt = 0 + for rule in rules: + # print('**REGEX**::', rule, pattern) + # print(re.search(rule, pattern)) + if re.search(rule, pattern): + cnt = cnt + 1 + # self.results[check] = [-1, None] + # break + + if len(rules) == cnt: + del self.results[check] + break + + return + + ###### TO DO ##### + ## Change the method name to meaningful name + ## Check methods name must follow _check[Description] + def _checkHasLogMetrics(self): + if self.log[1] == None: + self.results['trailWithoutCWLogs'] = [-1, None] + return + + args = {"logGroupNamePrefix": self.log[2]} + + resp = self.logClient.describe_log_groups(**args) + logDetail = resp.get('logGroups')[0] + + if logDetail['metricFilterCount'] == 0: + self.results['trailWithCWLogsWithoutMetrics'] = [-1, None] + return + + self.getAllMetrics() + self.regexFindCISPatterns() + + return \ No newline at end of file diff --git a/services/efs/drivers/EfsDriver.py b/services/efs/drivers/EfsDriver.py index dc3a859..2e4944a 100644 --- a/services/efs/drivers/EfsDriver.py +++ b/services/efs/drivers/EfsDriver.py @@ -34,4 +34,4 @@ def __check_backup_policy(self): ) if backup['BackupPolicy']['Status'] == 'DISABLED': - self.results['AutomatedBackup'] = [-1, 'Disabled'] + self.results['AutomatedBackup'] = [-1, 'Disabled'] \ No newline at end of file diff --git a/services/iam/Iam.py b/services/iam/Iam.py index 5265b08..2ed8b03 100644 --- a/services/iam/Iam.py +++ b/services/iam/Iam.py @@ -132,7 +132,9 @@ def getUsers(self): def advise(self): objs = {} - + users = {} + roles = {} + ''' users = self.getUsers() if self.getUserFlag == False: return objs @@ -163,9 +165,9 @@ def advise(self): objs['Group::' + group['GroupName']] = obj.getInfo() del obj - + ''' print('... (IAM:Account) inspecting') - obj = IamAccount(None, self.awsClients, users, roles) + obj = IamAccount(None, self.awsClients, users, roles, self.ssBoto) obj.run(self.__class__) objs['Account::Config'] = obj.getInfo() diff --git a/services/iam/drivers/IamAccount.py b/services/iam/drivers/IamAccount.py index aefcd00..c77cdc0 100644 --- a/services/iam/drivers/IamAccount.py +++ b/services/iam/drivers/IamAccount.py @@ -13,9 +13,10 @@ class IamAccount(IamCommon): PASSWORD_POLICY_MIN_SCORE = 4 ROOT_LOGIN_MAX_COUNT = 3 - def __init__(self, none, awsClients, users, roles): + def __init__(self, none, awsClients, users, roles, ssBoto): super().__init__() + self.ssBoto = ssBoto self.iamClient = awsClients['iamClient'] self.accClient = awsClients['accClient'] self.sppClient = awsClients['sppClient'] @@ -45,11 +46,11 @@ def passwordPolicyScoring(self, policies): score += 1 self.results['passwordPolicyReuse'] = [-1, value] continue - + if policy == 'MaxPasswordAge' and value <= 90: score += 1 continue - + if policy == 'PasswordReusePrevention' and value >= 24: score += 1 self.results['passwordPolicyReuse'] = [-1, value] @@ -253,3 +254,34 @@ def _checkCURReport(self): print(e) return + + def _checkConfigEnabled(self): + ssBoto = self.ssBoto + regions = Config.get("REGIONS_SELECTED") + + results = {} + badResults = [] + cnt = 0 + for region in regions: + if region == 'GLOBAL': + continue + + conf = bConfig(region_name = region) + cfg = ssBoto.client('config', config=conf) + + resp = cfg.describe_configuration_recorders() + recorders = resp.get('ConfigurationRecorders') + r = 1 + if len(recorders) == 0: + r = 0 + badResults.append(region) + + cnt = cnt + r + results[region] = r + + if cnt == 0: + self.results['EnableConfigService'] = [-1, None] + elif cnt < len(regions): + self.results['PartialEnableConfigService'] = [-1, ', '.join(badResults)] + else: + return \ No newline at end of file diff --git a/services/iam/iam.reporter.json b/services/iam/iam.reporter.json index 9965c25..34f98f9 100644 --- a/services/iam/iam.reporter.json +++ b/services/iam/iam.reporter.json @@ -143,6 +143,19 @@ "[IAM Password Policy]" ] }, + "passwordPolicyWeak": { + "category": "S", + "^description": "Your current password policy is not strong. Improving the strength of your password policy would improve the security of your account. Consider implementing best practices when setting the password policy. If you already configure as per your organization ", + "shortDesc": "Set a stronger password policy", + "criticality": "L", + "downtime": 0, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 0, + "ref": [ + "[IAM Password Policy]" + ] + }, "InlinePolicy": { "category": "O", "^description": "You have set an inline policy for {$COUNT} IAM users, groups or roles. An inline policy is a policy that's embedded in an IAM identity (a user, group, or role). In most cases, we recommend that you use managed policies instead of inline policies. This is because managed policies have several additional features such as reusability, central change management, versioning and rolling back, delegating permissions management and automatic updates. Inline policies are useful if you want to maintain a strict one-to-one relationship between a policy and the identity that it's applied to. For example, you want to be sure that the permissions in a policy are not inadvertently assigned to an identity other than the one they're intended for.", @@ -461,5 +474,31 @@ "ref": [ "[Creating Cost and Usage Reports]" ] + }, + "EnableConfigService":{ + "category": "S", + "^description": "AWS Config is not enabled. The AWS Config service performs configuration management of supported AWS resources in your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items, and any configuration changes between resources.", + "shortDesc": "Enable AWS Config", + "criticality": "H", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[Enable AWS Config]" + ] + }, + "PartialEnableConfigService":{ + "category": "S", + "^description": "Not all regions has Config enabled. The AWS Config service performs configuration management of supported AWS resources in your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items, and any configuration changes between resources.", + "shortDesc": "Enable AWS Config", + "criticality": "L", + "downtime": 0, + "slowness": 0, + "additionalCost": 1, + "needFullTest": 0, + "ref": [ + "[Enable AWS Config]" + ] } } \ No newline at end of file diff --git a/services/lambda_/drivers/LambdaCommon.py b/services/lambda_/drivers/LambdaCommon.py index b496d0f..5fce0d7 100644 --- a/services/lambda_/drivers/LambdaCommon.py +++ b/services/lambda_/drivers/LambdaCommon.py @@ -72,6 +72,12 @@ def get_invocation_count(self, day): return result['SampleCount'] + def _check_architectures_is_arm(self): + if 'arm64' in self.lambda_['Architectures']: + return + + self.results['UseArmArchitecture'] = [-1, ', '.join(self.lambda_['Architectures'])] + def _check_function_url_in_used(self): url_config = self.lambda_client.list_function_url_configs( FunctionName=self.function_name diff --git a/services/lambda_/lambda.reporter.json b/services/lambda_/lambda.reporter.json index 86bf339..a5ac450 100644 --- a/services/lambda_/lambda.reporter.json +++ b/services/lambda_/lambda.reporter.json @@ -180,5 +180,18 @@ "ref": [ "[Using resource-based policies for Lambda]" ] + }, + "UseArmArchitecture":{ + "category": "P", + "^description": "{$COUNT} of your Lambda function(s) are not using arm64 architecture. Lambda functions that use arm64 architecture (AWS Graviton2 processor) can achieve significantly better price and performance than the equivalent function running on x86_64 architecture. Consider using arm64 for compute-intensive applications such as high-performance computing, video encoding, and simulation workloads.", + "downtime": 0, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 1, + "criticality": "M", + "shortDesc": "Use Arm64 Architecture", + "ref": [ + "[Advantages of using arm64]" + ] } } \ No newline at end of file diff --git a/services/rds/Rds.py b/services/rds/Rds.py index 6b4f088..02939ab 100644 --- a/services/rds/Rds.py +++ b/services/rds/Rds.py @@ -1,11 +1,12 @@ import botocore from utils.Config import Config -from utils.Tools import _pr +from utils.Tools import _pr, _warn from services.Service import Service ##import drivers here from services.rds.drivers.RdsCommon import RdsCommon from services.rds.drivers.RdsMysql import RdsMysql +from services.rds.drivers.RdsMariadb import RdsMariadb from services.rds.drivers.RdsMysqlAurora import RdsMysqlAurora from services.rds.drivers.RdsPostgres import RdsPostgres from services.rds.drivers.RdsPostgresAurora import RdsPostgresAurora @@ -28,6 +29,7 @@ def __init__(self, region): self.secrets = [] engineDriver = { + 'mariadb': 'Mariadb', 'mysql': 'Mysql', 'aurora-mysql': 'MysqlAurora', 'postgres': 'Postgres', @@ -141,6 +143,7 @@ def advise(self): engine = 'sqlserver' if engine not in self.engineDriver: + _warn("{}, unsupported RDS Engine: [{}]".format(instance[dbKey], engine)) continue driver_ = self.engineDriver[engine] diff --git a/services/rds/drivers/RdsCommon.py b/services/rds/drivers/RdsCommon.py index 1fd06d1..f5af497 100644 --- a/services/rds/drivers/RdsCommon.py +++ b/services/rds/drivers/RdsCommon.py @@ -64,7 +64,8 @@ def getCAInfo(self): self.certInfo = myCert def showInfo(self): - print("Identifier: " + self.db['DBInstanceIdentifier'] + "\n") + identifier = self.db['DBInstanceIdentifier'] if self.isCluster == False else self.db['DBClusterIdentifier'] + print("Identifier: " + identifier + "\n") _pr(self.results) def getInstInfo(self): @@ -169,7 +170,8 @@ def _checkMasterUsername(self): 'aurora-mysql': 'admin', 'postgres': 'postgres', 'aurora-postgresql': 'postgres', - 'sqlserver': 'admin' + 'sqlserver': 'admin', + 'mariadb': 'admin' } if not self.engine in defaultMasterUser: @@ -179,6 +181,10 @@ def _checkMasterUsername(self): if defaultMasterUser[self.engine] == self.db['MasterUsername']: self.results['DefaultMasterAdmin'] = [-1, self.engine + "::" + self.db["MasterUsername"]] + def _checkHasStorageAutoscaling(self): + if not 'MaxAllocatedStorage' in self.db: + self.results['EnableStorageAutoscaling'] = [-1, None] + def _checkHasMultiAZ(self): multiAZ = -1 if self.db['MultiAZ'] == False else 1 self.results['MultiAZ'] = [multiAZ, 'Off' if multiAZ == -1 else 'On'] @@ -231,8 +237,12 @@ def _checkHasEnhancedMonitoring(self): self.results['EnhancedMonitor'] = [flag, 'On' if flag == -1 else 'Off'] def _checkDeleteProtection(self): + key = 'DeleteProtection' + if self.isCluster == True: + key = 'DeleteProtectionCluster' + flag = -1 if self.db['DeletionProtection'] == False else 1 - self.results['DeleteProtection'] = [flag, 'Off' if flag == -1 else 'On'] + self.results[key] = [flag, 'Off' if flag == -1 else 'On'] def _checkIsPublicAccessible(self): if self.isCluster == True: @@ -318,6 +328,32 @@ def _checkIsInstanceLatestGeneration(self): if compressedLists[dbInstFamily] > dbInstGeneration: self.results['LatestInstanceGeneration'] = [-1, self.db['DBInstanceClass']] + def _checkIsOpenSource(self): + if self.isCluster == True: + return + + validEngine = ['mariadb', 'postgres', 'mysql', 'aurora-mysql', 'aurora-postgresql'] + if not self.engine in validEngine: + self.results['ConsiderOpenSource'] = [-1, self.engine] + + def _checkIfAurora(self): + if self.isCluster == True: + return + + validEngine = ['mariadb', 'postgres', 'mysql'] + if self.engine in validEngine: + self.results['ConsiderAurora'] = [-1, self.engine] + + def _checkHasGravitonOption(self): + if self.isCluster == True: + return + + ## valid Graviton List + validEngine = ['mariadb', 'postgres', 'mysql', 'aurora-mysql', 'aurora-postgresql'] + if self.engine in validEngine: + if not 'g' in self.instInfo['prefixDetail']['attributes']: + self.results['MoveToGraviton'] = [-1, self.instInfo['prefix']] + def _checkHasPatches(self): if self.isServerless == True: @@ -682,4 +718,4 @@ def _checkFreeMemory(self): if freeMemoryMaxRatio - freeMemoryMinRatio > 0.5: self.results['FreeMemoryDropMT50pctIn24hours'] = [-1, "Max FreeMemory: {}GB, Min FreeMemory {}GB".format(freeMemoryMax, freeMemoryMin)] elif freeMemoryMthMinRatio > 0.60 and freeMemoryMthAvgRatio > 0.60: - self.results['RightSizingMemoryMonthMinMT60pct'] = [-1, "Monthly
Min FreeMemory: {}%, Avg FreeMemory {}%".format(round(freeMemoryMinRatio*100, 1), round(freeMemoryAvgRatio*100, 1))] \ No newline at end of file + self.results['RightSizingMemoryMonthMinMT60pct'] = [-1, "Monthly
Min FreeMemory: {}%, Avg FreeMemory {}%".format(round(freeMemoryMinRatio*100, 1), round(freeMemoryAvgRatio*100, 1))] diff --git a/services/rds/drivers/RdsMariadb.py b/services/rds/drivers/RdsMariadb.py new file mode 100644 index 0000000..846c40f --- /dev/null +++ b/services/rds/drivers/RdsMariadb.py @@ -0,0 +1,6 @@ +from .RdsMysql import RdsMysql + +class RdsMariadb(RdsMysql): + def __init__(self, db, rdsClient, ctClient, cwClient): + super().__init__(db, rdsClient, ctClient, cwClient) + self.loadParameterInfo() diff --git a/services/rds/rds.reporter.json b/services/rds/rds.reporter.json index d09d5a1..2aa1f86 100644 --- a/services/rds/rds.reporter.json +++ b/services/rds/rds.reporter.json @@ -131,7 +131,18 @@ }, "DeleteProtection":{ "category": "O", - "^description": "Deletion Protection is not enabled for {$COUNT} instances and Aurora clusters. This can lead to accidental deletion of Production database. Enable deletion protection to ensure that production databases are not accidentally deleted.", + "^description": "Deletion Protection is not enabled for {$COUNT} instances. This can lead to accidental deletion of Production database. Enable deletion protection to ensure that production databases are not accidentally deleted.", + "downtime": 0, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 0, + "criticality": "H", + "shortDesc": "Enable Delete Protection", + "ref": ["[Enable Delete Protection]"] + }, + "DeleteProtectionCluster":{ + "category": "O", + "^description": "Deletion Protection is not enabled for {$COUNT} Aurora clusters. This can lead to accidental deletion of Production database. Enable deletion protection to ensure that production databases are not accidentally deleted.", "downtime": 0, "slowness": 0, "additionalCost": 0, @@ -1009,5 +1020,57 @@ "[RDS - Delete Snapshot]", "[RDS Snapshot retention cost]" ] + }, + "MoveToGraviton":{ + "category": "CP", + "^description": "[Cost Optimisation] You have {$COUNT} RDS/Aurora is not running on graviton instance. Graviton2 provide up to 52% price/performance improvement for RDS open-source databases depending on database engine, version, and workload. Click on the references to learn more about Graviton2 benefits", + "downtime": 1, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 1, + "criticality": "M", + "shortDesc": "Move to graviton", + "ref": [ + "[Key Consideration on RDS Graviton]" + ] + }, + "EnableStorageAutoscaling":{ + "category": "R", + "^description": "[Reliablity] You have {$COUNT} RDS/Aurora does not have storage autoscaling enabled. With storage autoscaling enabled, when Amazon RDS detects that you are running out of free database space it automatically scales up your storage. Amazon RDS starts a storage modification for an autoscaling-enabled DB instance when these factors apply: (1) Free available space is less than or equal to 10 percent of the allocated storage. (2) The low-storage condition lasts at least five minutes. (3) At least six hours have passed since the last storage modification, or storage optimization has completed on the instance, whichever is longer.", + "downtime": -1, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 0, + "criticality": "M", + "shortDesc": "Enable storage autoscaling", + "ref": [ + "[Understand RDS Storage auto scaling]" + ] + }, + "ConsiderOpenSource":{ + "category": "T", + "^description": "[Modernization] Consider moving to open-sources. Modernizing commercial databases on AWS helps lowers costs by eliminating punitive licensing contracts and helps improve quality, efficiency, and performance.", + "downtime": 1, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 1, + "criticality": "I", + "shortDesc": "Consider open-source DB", + "ref": [ + "[DB Modernization Blog]" + ] + }, + "ConsiderAurora":{ + "category": "T", + "^description": "[Modernization] Consider moving to Aurora. Amazon Aurora (Aurora) is a fully managed relational database engine that's compatible with MySQL and PostgreSQL. You already know how MySQL and PostgreSQL combine the speed and reliability of high-end commercial databases with the simplicity and cost-effectiveness of open-source databases. The code, tools, and applications you use today with your existing MySQL and PostgreSQL databases can be used with Aurora. With some workloads, Aurora can deliver up to five times the throughput of MySQL and up to three times the throughput of PostgreSQL without requiring changes to most of your existing applications.", + "downtime": 1, + "slowness": 0, + "additionalCost": 0, + "needFullTest": 1, + "criticality": "I", + "shortDesc": "Consider Aurora", + "ref": [ + "[DB Modernization Blog]" + ] } } \ No newline at end of file diff --git a/services/s3/s3.reporter.json b/services/s3/s3.reporter.json index a4a9c6a..adceef6 100644 --- a/services/s3/s3.reporter.json +++ b/services/s3/s3.reporter.json @@ -46,7 +46,7 @@ "downtime": 0, "slowness": 0, "additionalCost": 0, - "needFullTest": -1, + "needFullTest": 1, "ref": [ "[Block S3 Public Access]" ] diff --git a/utils/ArguParser.py b/utils/ArguParser.py index 03aa867..663259f 100644 --- a/utils/ArguParser.py +++ b/utils/ArguParser.py @@ -23,7 +23,7 @@ class ArguParser: "services": { "required": False, "emptymsg": "Missing --services, using default value: $defaultValue", - "default": "rds,ec2,iam,s3,efs,lambda,guardduty,cloudfront,cloudtrail,elasticache,eks,dynamodb,opensearch,kms", + "default": "rds,ec2,iam,s3,efs,lambda,guardduty,cloudfront,cloudtrail,elasticache,eks,dynamodb,opensearch,kms,cloudwatch", "help": "--services ec2,iam" }, "debug": { @@ -68,7 +68,7 @@ class ArguParser: }, "frameworks": { "required": False, - "default": 'MSR,FTR,SSB,WAFS' + "default": 'MSR,FTR,SSB,WAFS,CIS' }, "others":{ "required": False, diff --git a/utils/Config.py b/utils/Config.py index cbec53c..11b723f 100644 --- a/utils/Config.py +++ b/utils/Config.py @@ -4,14 +4,15 @@ import constants as _C class Config: + AWS_SDK = { 'signature_version': 'v4' } ADVISOR = { 'TITLE': 'Service Screener', - 'VERSION': '2.0.1', - 'LAST_UPDATE': '26-Sep-2023' + 'VERSION': '2.1.0', + 'LAST_UPDATE': '17-Apr-2024' } ADMINLTE = { @@ -42,6 +43,8 @@ class Config: 'cloudfrontdist': ['ATTR', 'dist'], 'cloudtrailaccount': ['TEXT', 'General'], 'cloudtrailcommon': ['DICT', 'trail', 'TrailARN'], + 'cloudwatchtrails': ['ATTR', 'logname'], + 'cloudwatchcommon': ['DICT', 'log', 'logGroupName'], 'dynamodbgeneric': ['TEXT', 'General'], 'dynamodbcommon': ['ATTR', 'tablename'], 'ec2autoscaling': ['DICT', 'asg', 'AutoScalingGroupName'], @@ -67,6 +70,7 @@ class Config: 'kmscommon': ['DICT', 'kms', 'Arn'], 'lambdacommon': ['ATTR', 'function_name', ], 'opensearchcommon': ['ATTR', 'domain'], + 'rdsmariadb': ['DICT', 'db', ['DBInstanceIdentifier']], 'rdsmysql': ['DICT', 'db', ['DBInstanceIdentifier']], 'rdsmssql': ['DICT', 'db', ['DBInstanceIdentifier']], 'rdspostgres': ['DICT', 'db', ['DBInstanceIdentifier']], diff --git a/utils/CustomPage/CustomObject.py b/utils/CustomPage/CustomObject.py new file mode 100644 index 0000000..8fad436 --- /dev/null +++ b/utils/CustomPage/CustomObject.py @@ -0,0 +1,42 @@ +from utils.Config import Config +from utils.Tools import _pr +import json + +class CustomObject(): + ResourcesToTrack = {} + ResourcesStat = {} + + ### REDO THIS AS STRUCTURE CHANGE FROM 2 LEVELS to 3 LEVELS + def __init__(self): + for serv, groups in self.ResourcesToTrack.items(): + self.ResourcesStat[serv] = {} + for res, rules in groups.items(): + tRules = {'total': 0} + for rule in rules: + tRules[rule] = [] + + self.ResourcesStat[serv][res] = tRules + + s = json.dumps(self.ResourcesStat) + _pr(s) + + def recordItem(self, driver, name, results): + for serv, groups in self.ResourcesToTrack.items(): + if driver in groups: + rules = self.ResourcesToTrack[serv][driver] + + cnt = self.ResourcesStat[serv][driver]['total'] + self.ResourcesStat[serv][driver]['total'] = cnt + 1 + + for rule in rules: + if rule in results and results[rule][0] == -1: + self.ResourcesStat[serv][driver][rule].append(name) + + def printInfo(self, service): + if not service in self.ResourcesStat: + return None + + s = json.dumps(self.ResourcesStat[service]) + _pr(s) + + return s \ No newline at end of file diff --git a/utils/CustomPage/CustomPage.py b/utils/CustomPage/CustomPage.py new file mode 100644 index 0000000..d26f6c9 --- /dev/null +++ b/utils/CustomPage/CustomPage.py @@ -0,0 +1,49 @@ +import os, importlib +import constants as _C +from utils.Config import Config +from utils.Tools import _pr + +class CustomPage(): + Pages = {} + def __init__(self): + self.importCustomObject() + + def importCustomObject(self): + folderPath = 'utils/CustomPage/Pages' + files = os.listdir(folderPath) + + if len(self.Pages) > 0: + return + + for file in files: + if file[-2:] == 'py': + cname, ext = file.split('.') + module = 'utils.CustomPage.Pages.' + cname + sclass = getattr(importlib.import_module(module), cname) + self.Pages[cname] = sclass() + + def trackInfo(self, driver, name, results): + for cname, pObj in self.Pages.items(): + pObj.recordItem(driver, name, results) + + def resetOutput(self, service): + serv = service.lower() + prefix = 'CustomPage.' + for filename in os.listdir(_C.FORK_DIR): + if filename.startswith(prefix) and service.lower() in filename: + file_path = os.path.join(_C.FORK_DIR, filename) + if os.path.isfile(file_path): + os.remove(file_path) + _pr(f"Deleted: {file_path}") + + def writeOutput(self, service): + ## TODO: save that particular service only + serv = service.lower() + for cname, pObj in self.Pages.items(): + s = pObj.printInfo(serv) + if s == None: + return + + filename = _C.FORK_DIR + '/CustomPage.' + cname + '.' + service + '.json' + with open(filename, "w") as f: + f.write(s) \ No newline at end of file diff --git a/utils/CustomPage/Pages/Modernize.py b/utils/CustomPage/Pages/Modernize.py new file mode 100644 index 0000000..8b5e6af --- /dev/null +++ b/utils/CustomPage/Pages/Modernize.py @@ -0,0 +1,26 @@ +from utils.Config import Config +from utils.CustomPage.CustomObject import CustomObject + +class Modernize(CustomObject): + ResourcesToTrack = { + 'rds': { + 'rdsmariadb': {'MoveToGraviton', 'ConsiderAurora'}, + 'rdsmysql': {'MoveToGraviton', 'ConsiderAurora'}, + 'rdsmssql': {'ConsiderOpenSource'}, + 'rdspostgres': {'MoveToGraviton', 'ConsiderAurora'}, + 'rdsmysqlaurora': {}, + 'rdspostgresaurora': {} + }, + 'ec2': { + 'ec2instance': {} + }, + 'eks': { + 'ekscommon': {} + }, + 'lambda': { + 'lambdacommon': {} + } + } + + def __init__(self): + super().__init__() \ No newline at end of file