From cfba5b172bfec69b7fbdff9c5f2cdf88e5bbada1 Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 24 Jan 2025 06:41:45 -0500 Subject: [PATCH 1/3] fix multi-line f-strings. fix not parsing all VPCs --- .../lza-upgrade-check.py | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py index 3c7cd226c..1af04ef36 100644 --- a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py +++ b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py @@ -139,8 +139,7 @@ def process_vpc_config(account, vpc, vpc_dict): def flatten_subnet_config(vpc_name, subnets): """Takes subnet object from ASEA config and generate list of subnets to be created per AZ""" return [ - {"Name": f"{subnet['name']}_{vpc_name}_az{d['az']}_net", - "route-table": f"{d['route-table']}_rt"} + {"Name": f"{subnet['name']}_{vpc_name}_az{d['az']}_net", "route-table": f"{d['route-table']}_rt"} for subnet in subnets for d in subnet["definitions"] if not d.get('disabled', False) @@ -278,8 +277,7 @@ def get_transit_gateway_route_tables(ec2_client, tgw_id: str) -> List[Dict]: blackhole_routes = get_transit_gateway_routes( ec2_client, tgwrt["TransitGatewayRouteTableId"], "blackhole") except Exception as e: - logger.error(f"Failed to get routes for table { - tgwrt['TransitGatewayRouteTableId']}: {str(e)}") + logger.error(f"Failed to get routes for table {tgwrt['TransitGatewayRouteTableId']}: {str(e)}") active_routes = [] name = next((tag["Value"] for tag in tgwrt.get("Tags", []) @@ -322,8 +320,7 @@ def get_transit_gateway_routes(ec2_client, tgwrt_id: str, state: str) -> List[Di """ valid_states = ['active', 'blackhole', 'deleted', 'deleting', 'pending'] if state not in valid_states: - raise ValueError(f"Invalid route state. Must be one of: { - ', '.join(valid_states)}") + raise ValueError(f"Invalid route state. Must be one of: {', '.join(valid_states)}") try: response = ec2_client.search_transit_gateway_routes( @@ -376,10 +373,12 @@ def get_vpc_route_tables(ec2_client, vpcId): r = {"Name": name, "RouteTableId": rt["RouteTableId"], "VpcId": rt["VpcId"], + "Main": any([asso["Main"] for asso in rt["Associations"] if "Main" in asso]), "SubnetAssociations": [asso["SubnetId"] for asso in rt["Associations"] if "SubnetId" in asso], "Routes": rt["Routes"], "RawResponse": rt } + rt_list.append(r) return rt_list @@ -474,9 +473,12 @@ def analyze_vpcs(vpc_from_config, account_list, role_to_assume, region): if f"{rt['name']}_rt" == drt["Name"]] if len(crt) == 0: logger.warning( - f"Route table {drt['Name']} exists in VPC {dv} but not in config") - drift["route_tables_not_in_config"].append( - {"RouteTable": drt["Name"], "Vpc": dv}) + f"Route table {drt['Name']} exists in VPC {dv} but not in config. {'(Main)' if drt['Main'] else ''}") + + # Do not add to drift if its the main route table and there are no Subnet Associations + if not drt['Main'] or len(drt['SubnetAssociations']) > 0: + drift["route_tables_not_in_config"].append( + {"RouteTable": drt["Name"], "Vpc": dv}) continue # check if all route tables from the config exist in the environment @@ -536,7 +538,7 @@ def analyze_vpcs(vpc_from_config, account_list, role_to_assume, region): vpc_details[dv] = { "Account": account, "RouteTables": d_rtables, "Subnets": d_subnets} - return {"Drift": drift, "VpcDetails": vpc_details} + return {"Drift": drift, "VpcDetails": vpc_details} def get_tgw_from_config(asea_config, region): From 2514a9ae26d9acb7d72e965c1b50fe4979d59378 Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 24 Jan 2025 17:02:29 -0500 Subject: [PATCH 2/3] add detection of modified route entries --- .../tools/network-drift-detection/README.md | 1 + .../lza-upgrade-check.py | 88 +++++++++++++++++++ 2 files changed, 89 insertions(+) diff --git a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md index df6130be4..80741d2cf 100644 --- a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md +++ b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md @@ -74,6 +74,7 @@ This section details drift in subnets and their route tables. Careful inspection |Key|Description|Notes and upgrade impact| |---|-----------|------------------------| +|route_table_entries_mismatches|Difference in route entries between ASEA config and AWS account|Route entries may have been modified manually **the changes will be overwritten during the upgrade**. Note: the script doesn't handle all route target types, manual verification is still recommended| |route_tables_not_deployed|Route tables found in the ASEA config, but not in the AWS account|These route tables may have been manually removed and **will be re-created during the upgrade**| |route_tables_not_in_config|Route tables not found in the ASEA config, but are present in the AWS account|This is for information, these route tables won't be modified during the upgrade. See note below.| |subnet_route_table_mismatches|There is a configuration difference between the ASEA config and the current state of the route table|These route tables may have been manually modified, **the changes will be overwritten during the upgrade**| diff --git a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py index 1af04ef36..f54f28b32 100644 --- a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py +++ b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py @@ -9,6 +9,8 @@ import boto3 from botocore.exceptions import ClientError +if "LOGLEVEL" in os.environ: + logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING"), format='%(levelname)s:%(message)s') logger = logging.getLogger(__name__) @@ -446,6 +448,7 @@ def analyze_vpcs(vpc_from_config, account_list, role_to_assume, region): "subnets_not_deployed": [], "subnets_not_associated": [], "subnet_route_table_mismatches": [], + "route_table_entries_mismatches": [] } vpc_details = {} @@ -492,6 +495,16 @@ def analyze_vpcs(vpc_from_config, account_list, role_to_assume, region): drift["route_tables_not_deployed"].append( {"RouteTable": crt['name'], "Vpc": dv}) continue + elif len(drt) > 0 : + if len(drt) > 1: + logger.error( + f"More than one route table named {crt['name']} is deployed! LZA upgrade already executed?") + + # matching config and deployed route, compare the entries + rteDrift = compare_route_table(crt, drt[0]) + if len(rteDrift) > 0: + drift["route_table_entries_mismatches"].append( + {"RouteTable": crt['name'], "Vpc": dv, "Entries": rteDrift}) # check if there are more subnets than in the config d_subnets = get_vpc_subnets(client, deployed_vpcs[dv]) @@ -540,6 +553,81 @@ def analyze_vpcs(vpc_from_config, account_list, role_to_assume, region): return {"Drift": drift, "VpcDetails": vpc_details} +def compare_route_table(crt, drt): + """ + Compare entries of configured and deployed route table + crt: configured route table in ASEA config + drt: deployed route table in AWS VPC + """ + drift = [] + + #ignoring gateway endpoint routes (S3 and DynamoDB) and local subnet routes + cRoutes = [r for r in crt.get('routes', []) if r['target'].lower() != 's3' and r['target'].lower() != 'dynamodb'] + dRoutes = [r for r in drt.get('Routes', []) if 'DestinationCidrBlock' in r and r.get("GatewayId", "") != "local"] + + if len(cRoutes) != len(dRoutes): + logger.warning(f"Different number of routes in config and deployed route table for {crt['name']}") + + #check if all route entries in config matches what is deployed + for cr in cRoutes: + if cr['target'].lower() == "pcx": + logger.warning(f"Route {cr['destination']} is a VPC peering route. Skipping check") + continue + + dr = [r for r in dRoutes if cr['destination'] == r['DestinationCidrBlock']] + if len(dr) == 0: + logger.warning(f"Route {cr['destination']} exists in config but not found in deployed route table") + drift.append({"Route": cr['destination'], "Reason": "Not found in deployed route table"}) + continue + elif len(dr) == 1: + dre = dr[0] + if cr['target'] == "IGW": + if not ("GatewayId" in dre and dre['GatewayId'].startswith("igw-")): + logger.warning(f"Route {cr['destination']} not matched to IGW") + drift.append({"Route": cr['destination'], "Reason": "Not matched to IGW"}) + elif cr['target'] == "TGW": + if not "TransitGatewayId" in dre: + logger.warning(f"Route {cr['destination']} not matched to TGW") + drift.append({"Route": cr['destination'], "Reason": "Not matched to TGW"}) + elif cr['target'].startswith("NFW_"): + if not ("GatewayId" in dre and dre['GatewayId'].startswith("vpce-")): + logger.warning(f"Route {cr['destination']} not matched to NFW VPCE") + drift.append({"Route": cr['destination'], "Reason": "Not matched to NFW VPCE"}) + elif cr['target'].startswith("NATGW_"): + if not "NatGatewayId" in dre: + logger.warning(f"Route {cr['destination']} not matched to NATGW") + drift.append({"Route": cr['destination'], "Reason": "Not matched to NATGW"}) + elif cr['target'] == "VGW": + if not ("GatewayId" in dre and dre['GatewayId'].startswith("vgw-")): + logger.warning(f"Route {cr['destination']} not matched to VGW") + drift.append({"Route": cr['destination'], "Reason": "Not matched to VGW"}) + elif cr['target'].lower() == "firewall": + if not "InstanceId" in dre: + logger.warning(f"Route {cr['destination']} not matched to firewall instance") + drift.append({"Route": cr['destination'], "Reason": "Not matched to firewall instance"}) + else: + logger.error(f"Route target {cr['target']} is not supported!") + drift.append({"Route": cr['destination'], "Reason": f"Route target {cr['target']} is not supported!"}) + else: + #this should not be possible! + logger.error(f"More than one route with destination {cr['destination']} is deployed!") + drift.append({"Route": cr['destination'], "Reason": f"More than one route with destination {cr['destination']} found"}) + + #check if there are route entries deployed that are not in the config + for dr in dRoutes: + if 'VpcPeeringConnectionId' in dr: + logger.warning(f"Route {dr['DestinationCidrBlock']} is a VPC peering route. Skipping check") + continue + + cr = [r for r in cRoutes if r['destination'] == dr['DestinationCidrBlock']] + if len(cr) == 0: + logger.warning(f"Route {dr['DestinationCidrBlock']} exists in deployed route table but not found in config") + drift.append({"Route": dr['DestinationCidrBlock'], "Reason": "Not found in config"}) + + return drift + + + def get_tgw_from_config(asea_config, region): """ From 53262c08dc8a52939ba701673f1c7bfaec3a276b Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 27 Jan 2025 14:55:45 -0500 Subject: [PATCH 3/3] linting --- .../tools/network-drift-detection/README.md | 2 +- .../lza-upgrade-check.py | 100 +++++++++++------- 2 files changed, 62 insertions(+), 40 deletions(-) diff --git a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md index 80741d2cf..0fce7e2b5 100644 --- a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md +++ b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/README.md @@ -74,7 +74,7 @@ This section details drift in subnets and their route tables. Careful inspection |Key|Description|Notes and upgrade impact| |---|-----------|------------------------| -|route_table_entries_mismatches|Difference in route entries between ASEA config and AWS account|Route entries may have been modified manually **the changes will be overwritten during the upgrade**. Note: the script doesn't handle all route target types, manual verification is still recommended| +|route_table_entries_mismatches|Difference in route entries between ASEA config and AWS account|Route entries may have been modified manually, **the changes will be overwritten during the upgrade**. Note: the script doesn't handle all route target types, manual verification is still recommended| |route_tables_not_deployed|Route tables found in the ASEA config, but not in the AWS account|These route tables may have been manually removed and **will be re-created during the upgrade**| |route_tables_not_in_config|Route tables not found in the ASEA config, but are present in the AWS account|This is for information, these route tables won't be modified during the upgrade. See note below.| |subnet_route_table_mismatches|There is a configuration difference between the ASEA config and the current state of the route table|These route tables may have been manually modified, **the changes will be overwritten during the upgrade**| diff --git a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py index f54f28b32..be4b54dc7 100644 --- a/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py +++ b/reference-artifacts/Custom-Scripts/lza-upgrade/tools/network-drift-detection/lza-upgrade-check.py @@ -10,7 +10,8 @@ from botocore.exceptions import ClientError if "LOGLEVEL" in os.environ: - logging.basicConfig(level=os.environ.get("LOGLEVEL", "WARNING"), format='%(levelname)s:%(message)s') + logging.basicConfig(level=os.environ.get( + "LOGLEVEL", "WARNING"), format='%(levelname)s:%(message)s') logger = logging.getLogger(__name__) @@ -141,7 +142,7 @@ def process_vpc_config(account, vpc, vpc_dict): def flatten_subnet_config(vpc_name, subnets): """Takes subnet object from ASEA config and generate list of subnets to be created per AZ""" return [ - {"Name": f"{subnet['name']}_{vpc_name}_az{d['az']}_net", "route-table": f"{d['route-table']}_rt"} + {"Name": f"{subnet['name']}_{vpc_name}_az{d['az']}_net", "route-table": f"{d['route-table']}_rt"} # nopep8 for subnet in subnets for d in subnet["definitions"] if not d.get('disabled', False) @@ -279,7 +280,7 @@ def get_transit_gateway_route_tables(ec2_client, tgw_id: str) -> List[Dict]: blackhole_routes = get_transit_gateway_routes( ec2_client, tgwrt["TransitGatewayRouteTableId"], "blackhole") except Exception as e: - logger.error(f"Failed to get routes for table {tgwrt['TransitGatewayRouteTableId']}: {str(e)}") + logger.error(f"Failed to get routes for table {tgwrt['TransitGatewayRouteTableId']}: {str(e)}") # nopep8 active_routes = [] name = next((tag["Value"] for tag in tgwrt.get("Tags", []) @@ -322,7 +323,7 @@ def get_transit_gateway_routes(ec2_client, tgwrt_id: str, state: str) -> List[Di """ valid_states = ['active', 'blackhole', 'deleted', 'deleting', 'pending'] if state not in valid_states: - raise ValueError(f"Invalid route state. Must be one of: {', '.join(valid_states)}") + raise ValueError(f"Invalid route state. Must be one of: {', '.join(valid_states)}") # nopep8 try: response = ec2_client.search_transit_gateway_routes( @@ -495,7 +496,7 @@ def analyze_vpcs(vpc_from_config, account_list, role_to_assume, region): drift["route_tables_not_deployed"].append( {"RouteTable": crt['name'], "Vpc": dv}) continue - elif len(drt) > 0 : + elif len(drt) > 0: if len(drt) > 1: logger.error( f"More than one route table named {crt['name']} is deployed! LZA upgrade already executed?") @@ -553,6 +554,7 @@ def analyze_vpcs(vpc_from_config, account_list, role_to_assume, region): return {"Drift": drift, "VpcDetails": vpc_details} + def compare_route_table(crt, drt): """ Compare entries of configured and deployed route table @@ -561,74 +563,95 @@ def compare_route_table(crt, drt): """ drift = [] - #ignoring gateway endpoint routes (S3 and DynamoDB) and local subnet routes - cRoutes = [r for r in crt.get('routes', []) if r['target'].lower() != 's3' and r['target'].lower() != 'dynamodb'] - dRoutes = [r for r in drt.get('Routes', []) if 'DestinationCidrBlock' in r and r.get("GatewayId", "") != "local"] + # ignoring gateway endpoint routes (S3 and DynamoDB) and local subnet routes + cRoutes = [r for r in crt.get('routes', []) if r['target'].lower( + ) != 's3' and r['target'].lower() != 'dynamodb'] + dRoutes = [r for r in drt.get( + 'Routes', []) if 'DestinationCidrBlock' in r and r.get("GatewayId", "") != "local"] if len(cRoutes) != len(dRoutes): - logger.warning(f"Different number of routes in config and deployed route table for {crt['name']}") + logger.warning( + f"Different number of routes in config and deployed route table for {crt['name']}") - #check if all route entries in config matches what is deployed + # check if all route entries in config matches what is deployed for cr in cRoutes: if cr['target'].lower() == "pcx": - logger.warning(f"Route {cr['destination']} is a VPC peering route. Skipping check") + logger.warning( + f"Route {cr['destination']} is a VPC peering route. Skipping check") continue - dr = [r for r in dRoutes if cr['destination'] == r['DestinationCidrBlock']] + dr = [r for r in dRoutes if cr['destination'] + == r['DestinationCidrBlock']] if len(dr) == 0: - logger.warning(f"Route {cr['destination']} exists in config but not found in deployed route table") - drift.append({"Route": cr['destination'], "Reason": "Not found in deployed route table"}) + logger.warning(f"Route {cr['destination']} exists in config but not found in deployed route table") # nopep8 + drift.append( + {"Route": cr['destination'], "Reason": "Not found in deployed route table"}) continue elif len(dr) == 1: dre = dr[0] if cr['target'] == "IGW": if not ("GatewayId" in dre and dre['GatewayId'].startswith("igw-")): - logger.warning(f"Route {cr['destination']} not matched to IGW") - drift.append({"Route": cr['destination'], "Reason": "Not matched to IGW"}) + logger.warning( + f"Route {cr['destination']} not matched to IGW") + drift.append( + {"Route": cr['destination'], "Reason": "Not matched to IGW"}) elif cr['target'] == "TGW": if not "TransitGatewayId" in dre: - logger.warning(f"Route {cr['destination']} not matched to TGW") - drift.append({"Route": cr['destination'], "Reason": "Not matched to TGW"}) + logger.warning( + f"Route {cr['destination']} not matched to TGW") + drift.append( + {"Route": cr['destination'], "Reason": "Not matched to TGW"}) elif cr['target'].startswith("NFW_"): if not ("GatewayId" in dre and dre['GatewayId'].startswith("vpce-")): - logger.warning(f"Route {cr['destination']} not matched to NFW VPCE") - drift.append({"Route": cr['destination'], "Reason": "Not matched to NFW VPCE"}) + logger.warning( + f"Route {cr['destination']} not matched to NFW VPCE") + drift.append( + {"Route": cr['destination'], "Reason": "Not matched to NFW VPCE"}) elif cr['target'].startswith("NATGW_"): if not "NatGatewayId" in dre: - logger.warning(f"Route {cr['destination']} not matched to NATGW") - drift.append({"Route": cr['destination'], "Reason": "Not matched to NATGW"}) + logger.warning( + f"Route {cr['destination']} not matched to NATGW") + drift.append( + {"Route": cr['destination'], "Reason": "Not matched to NATGW"}) elif cr['target'] == "VGW": if not ("GatewayId" in dre and dre['GatewayId'].startswith("vgw-")): - logger.warning(f"Route {cr['destination']} not matched to VGW") - drift.append({"Route": cr['destination'], "Reason": "Not matched to VGW"}) + logger.warning( + f"Route {cr['destination']} not matched to VGW") + drift.append( + {"Route": cr['destination'], "Reason": "Not matched to VGW"}) elif cr['target'].lower() == "firewall": if not "InstanceId" in dre: - logger.warning(f"Route {cr['destination']} not matched to firewall instance") - drift.append({"Route": cr['destination'], "Reason": "Not matched to firewall instance"}) + logger.warning( + f"Route {cr['destination']} not matched to firewall instance") + drift.append( + {"Route": cr['destination'], "Reason": "Not matched to firewall instance"}) else: logger.error(f"Route target {cr['target']} is not supported!") - drift.append({"Route": cr['destination'], "Reason": f"Route target {cr['target']} is not supported!"}) + drift.append({"Route": cr['destination'], "Reason": f"Route target { + cr['target']} is not supported!"}) else: - #this should not be possible! - logger.error(f"More than one route with destination {cr['destination']} is deployed!") - drift.append({"Route": cr['destination'], "Reason": f"More than one route with destination {cr['destination']} found"}) + # this should not be possible! + logger.error(f"More than one route with destination {cr['destination']} is deployed!") # nopep8 + drift.append({"Route": cr['destination'], "Reason": f"More than one route with destination { + cr['destination']} found"}) - #check if there are route entries deployed that are not in the config + # check if there are route entries deployed that are not in the config for dr in dRoutes: if 'VpcPeeringConnectionId' in dr: - logger.warning(f"Route {dr['DestinationCidrBlock']} is a VPC peering route. Skipping check") + logger.warning( + f"Route {dr['DestinationCidrBlock']} is a VPC peering route. Skipping check") continue - cr = [r for r in cRoutes if r['destination'] == dr['DestinationCidrBlock']] + cr = [r for r in cRoutes if r['destination'] + == dr['DestinationCidrBlock']] if len(cr) == 0: - logger.warning(f"Route {dr['DestinationCidrBlock']} exists in deployed route table but not found in config") - drift.append({"Route": dr['DestinationCidrBlock'], "Reason": "Not found in config"}) + logger.warning(f"Route {dr['DestinationCidrBlock']} exists in deployed route table but not found in config") # nopep8 + drift.append( + {"Route": dr['DestinationCidrBlock'], "Reason": "Not found in config"}) return drift - - def get_tgw_from_config(asea_config, region): """ Get all Transit Gateways defined in the config for the provided region @@ -787,8 +810,7 @@ def main(): accel_prefix = args.accel_prefix asea_config_path = args.raw_config_path output_path = args.output_dir - role_to_assume = args.role_to_assume if args.role_to_assume else f"{ - accel_prefix}-PipelineRole" + role_to_assume = args.role_to_assume if args.role_to_assume else f"{accel_prefix}-PipelineRole" # nopep8 parameter_table = f"{accel_prefix}-Parameters" shared_network_key = 'shared-network' home_region = args.home_region