From d4c544972694246605d68946e6bf43f133ecece2 Mon Sep 17 00:00:00 2001 From: Raphael Fuchs Date: Tue, 22 Mar 2022 18:07:08 +0100 Subject: [PATCH 1/4] Add management of SecurityHub findings Workflow Status transitions upon change of compliance of AWS Config Rules --- Code/lambda_function.py | 93 ++++++++++++++++++++++++++++++++--------- 1 file changed, 73 insertions(+), 20 deletions(-) diff --git a/Code/lambda_function.py b/Code/lambda_function.py index 6598ce1..adb16a4 100644 --- a/Code/lambda_function.py +++ b/Code/lambda_function.py @@ -1,7 +1,11 @@ """Script to integrate Config with Security Hub.""" +import logging import boto3 import hashlib +logger = logging.getLogger() +logger.setLevel(logging.INFO) + SECURITYHUB = boto3.client('securityhub') CONFIG = boto3.client('config') @@ -19,19 +23,67 @@ def get_description_of_rule(config_rule_name): description = response['ConfigRules'][0]['ConfigRuleName'] return description except Exception as error: - print("Error: ", error) + logger.error(f"Exception: {error}") raise - + def get_compliance_and_severity(new_status): """Return compliance status.""" status = ['FAILED', 3.0, 30] if new_status == 'COMPLIANT': status = ['PASSED', 0, 0] elif new_status == 'NOT_APPLICABLE': - status = ['NOT_AVAILABLE', 0, 0] + # we set it to PASSED as opposed to NOT_AVAILABLE. Setting to NOT_AVAILABLE triggers a + # transition of the WorkflowStatus to NEW. Something we do not want since NOT_APPLICABLE + # in AWS Config means the resource has been deleted + # see https://docs.aws.amazon.com/config/latest/APIReference/API_DescribeComplianceByResource.html. + status = ['PASSED', 0, 0] return status +def get_workflow_status(compliance_status): + if compliance_status == 'PASSED' or compliance_status == 'NOT_AVAILABLE': + updated_status = 'RESOLVED' + else: + updated_status = 'NEW' + return updated_status + + +def batch_import_findings(new_findings): + try: + response = SECURITYHUB.batch_import_findings(Findings=new_findings) + if response['FailedCount'] > 0: + logger.info(f"Failed to import {response['FailedCount']} findings") + return response + except Exception as error: + logger.error(f"Exception: {error} ") + raise + + +def batch_update_findings(finding_id, updated_status, event_details): + try: + response = SECURITYHUB.batch_update_findings( + FindingIdentifiers=[ + { + 'Id': finding_id, + 'ProductArn': (f"arn:aws:securityhub:{event_details['awsRegion']}:" + f"{event_details['awsAccountId']}:" + f"product/{event_details['awsAccountId']}/default") + }, + ], + Workflow={ + 'Status': updated_status + }, + ) + if response['ProcessedFindings']: + logger.info(f"BatchUpdateFindings id='{finding_id}' workflow_status='{updated_status}' response='{response}'") + else: + logger.error(f"Failed to update finding worflow status: {response}" ) + + return response + except Exception as error: + logger.error(f"Exception: {error}") + raise + def map_config_findings_to_sh(event, old_recorded_time): """Create custom finding.""" @@ -44,17 +96,19 @@ def map_config_findings_to_sh(event, old_recorded_time): remediation_url = (f"https://console.aws.amazon.com/config/home?region={event_details['awsRegion']}#/rules/details?configRuleName={config_rule_name}") finding_hash = hashlib.sha256(f"{event_details['configRuleARN']}-{event_details['resourceId']}".encode()).hexdigest() finding_id = (f"arn:aws:securityhub:{event_details['awsRegion']}:{event_details['awsAccountId']}:config/rules/{config_rule_name}/finding/{finding_hash}") + updated_status = get_workflow_status(compliance_status[0]) + new_findings.append({ "SchemaVersion": "2018-10-08", "Id": finding_id, "ProductArn": (f"arn:aws:securityhub:{event_details['awsRegion']}:" - f"{event_details['awsAccountId']}:" - f"product/{event_details['awsAccountId']}/default"), + f"{event_details['awsAccountId']}:" + f"product/{event_details['awsAccountId']}/default"), "GeneratorId": event_details['configRuleARN'], "AwsAccountId": event_details['awsAccountId'], 'ProductFields': { - 'ProviderName': 'AWS Config' - }, + 'ProviderName': 'AWS Config' + }, "Types": [ "Software and Configuration Checks/AWS Config Analysis" ], @@ -81,19 +135,18 @@ def map_config_findings_to_sh(event, old_recorded_time): 'Region': event_details['awsRegion'] } ], - 'Compliance': {'Status': compliance_status[0]} + 'Compliance': {'Status': compliance_status[0]}, + 'Workflow': {'Status': updated_status} }) - + if new_findings: - try: - response = SECURITYHUB.batch_import_findings(Findings=new_findings) - if response['FailedCount'] > 0: - print( - "Failed to import {} findings".format( - response['FailedCount'])) - except Exception as error: - print("Error: ", error) - raise + #batch_import_findings allows to set ComplianceStatus + response = batch_import_findings(new_findings) + logger.info(f"BatchImportFindings id='{finding_id}' compliance_status='{compliance_status[0]}' configRuleComplianceType='{new_status}' workflow_status='{updated_status}' description='{description}'") + + if 'oldEvaluationResult' in event_details: + #batch_update_findings allows to set WorkflowStatus + response = batch_update_findings(finding_id, updated_status, event_details) def parse_message(event): @@ -106,10 +159,10 @@ def parse_message(event): old_recorded_time = (event_details['oldEvaluationResult']['resultRecordedTime']) map_config_findings_to_sh(event, old_recorded_time) else: - print("Other Notification") + logger.info("Other Notification") def lambda_handler(event, context): """Begin Lambda execution.""" - print("Event Before Parsing: ", event) + logger.info(f"Event Before Parsing: {event}") parse_message(event) \ No newline at end of file From 274744aa294281b785c70a13cd3fc289b49b7a68 Mon Sep 17 00:00:00 2001 From: Raphael Fuchs Date: Thu, 24 Mar 2022 15:00:32 +0100 Subject: [PATCH 2/4] Add required permissions for BatchUpdateFindings --- Template/cloudformation_template.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Template/cloudformation_template.yaml b/Template/cloudformation_template.yaml index c4eca13..b4ac5a3 100644 --- a/Template/cloudformation_template.yaml +++ b/Template/cloudformation_template.yaml @@ -30,6 +30,8 @@ Resources: - Effect: Allow Action: - 'securityhub:BatchImportFindings' + - 'securityhub:BatchUpdateFindings' + - 'securityhub:GetFindings' Resource: - '*' - Effect: Allow From 6651cf68ec74ebf422335c3ef42d203a04b19309 Mon Sep 17 00:00:00 2001 From: Raphael Fuchs Date: Thu, 24 Mar 2022 15:04:16 +0100 Subject: [PATCH 3/4] Add waiting time before execution of BatchUpdateFindings and retries of BatchUpdateFindings upon failures (asynchronuous handling of BatchImportFindings may lead to concurrency issues) --- Code/lambda_function.py | 43 +++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/Code/lambda_function.py b/Code/lambda_function.py index adb16a4..84de4b0 100644 --- a/Code/lambda_function.py +++ b/Code/lambda_function.py @@ -2,6 +2,7 @@ import logging import boto3 import hashlib +import time logger = logging.getLogger() logger.setLevel(logging.INFO) @@ -61,24 +62,32 @@ def batch_import_findings(new_findings): def batch_update_findings(finding_id, updated_status, event_details): try: - response = SECURITYHUB.batch_update_findings( - FindingIdentifiers=[ - { - 'Id': finding_id, - 'ProductArn': (f"arn:aws:securityhub:{event_details['awsRegion']}:" - f"{event_details['awsAccountId']}:" - f"product/{event_details['awsAccountId']}/default") + i = 0 + while i < 3: + i += 1 + time.sleep(1) #some wait time is recommended since previous call to BatchUpdateFindings is handled asynchronuously + response = SECURITYHUB.batch_update_findings( + FindingIdentifiers=[ + { + 'Id': finding_id, + 'ProductArn': (f"arn:aws:securityhub:{event_details['awsRegion']}:" + f"{event_details['awsAccountId']}:" + f"product/{event_details['awsAccountId']}/default") + }, + ], + Workflow={ + 'Status': updated_status }, - ], - Workflow={ - 'Status': updated_status - }, - ) - if response['ProcessedFindings']: - logger.info(f"BatchUpdateFindings id='{finding_id}' workflow_status='{updated_status}' response='{response}'") - else: - logger.error(f"Failed to update finding worflow status: {response}" ) - + ) + if response['ProcessedFindings']: + logger.info(f"BatchUpdateFindings id='{finding_id}' workflow_status='{updated_status}' response='{response}'") + break + else: + if i == 2: + logger.error(f"Failed (final tentative) to update finding worflow status: {response}" ) + else: + logger.error(f"Failed to update (try number='{i}') finding worflow status: '{response}', retrying..." ) + return response except Exception as error: logger.error(f"Exception: {error}") From 8bbcaffc0c137d08f3ac9315ce97fe69177a9243 Mon Sep 17 00:00:00 2001 From: Raphael Fuchs Date: Fri, 25 Mar 2022 08:58:51 +0100 Subject: [PATCH 4/4] minor improvements from peer review --- Code/lambda_function.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/Code/lambda_function.py b/Code/lambda_function.py index 84de4b0..9c60790 100644 --- a/Code/lambda_function.py +++ b/Code/lambda_function.py @@ -9,6 +9,7 @@ SECURITYHUB = boto3.client('securityhub') CONFIG = boto3.client('config') +MAX_RETRIES = 3 def get_description_of_rule(config_rule_name): @@ -62,10 +63,8 @@ def batch_import_findings(new_findings): def batch_update_findings(finding_id, updated_status, event_details): try: - i = 0 - while i < 3: - i += 1 - time.sleep(1) #some wait time is recommended since previous call to BatchUpdateFindings is handled asynchronuously + for i in range(MAX_RETRIES): + response = SECURITYHUB.batch_update_findings( FindingIdentifiers=[ { @@ -79,15 +78,15 @@ def batch_update_findings(finding_id, updated_status, event_details): 'Status': updated_status }, ) - if response['ProcessedFindings']: + + #Retry if UnprocessedFindings exist + if response['UnprocessedFindings']: + logger.error(f"Failed to update (try number='{i}'/{MAX_RETRIES-1}) finding worflow status: '{response}'" ) + time.sleep(1) + else: logger.info(f"BatchUpdateFindings id='{finding_id}' workflow_status='{updated_status}' response='{response}'") break - else: - if i == 2: - logger.error(f"Failed (final tentative) to update finding worflow status: {response}" ) - else: - logger.error(f"Failed to update (try number='{i}') finding worflow status: '{response}', retrying..." ) - + return response except Exception as error: logger.error(f"Exception: {error}")