diff --git a/.gitignore b/.gitignore index fa6c748..a0c26db 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ src/cfn_environment_base.egg-info/ ami_cache.* config.* +instancetype_to_arch.* .DS_Store .idea/ .eggs diff --git a/src/environmentbase/data/ami_cache.json b/src/environmentbase/data/ami_cache.json index 4573c9e..195968c 100644 --- a/src/environmentbase/data/ami_cache.json +++ b/src/environmentbase/data/ami_cache.json @@ -1,53 +1,44 @@ { - "us-east-1": { - "natAmiId": "ami-4f9fee26", - "ubuntu1404LtsAmiId": "ami-018c9568", - "amazonLinuxAmiId": "ami-0d4cfd66", - "centOS65AmiId": "ami-8997afe0" - }, - "us-west-1": { - "natAmiId": "ami-7850793d", - "ubuntu1404LtsAmiId": "ami-ee4f77ab", - "amazonLinuxAmiId": "ami-87ea13c3", - "centOS65AmiId": "ami-1a013c5f" - }, - "us-west-2": { - "natAmiId": "ami-69ae8259", - "ubuntu1404LtsAmiId": "ami-6ac2a85a", - "amazonLinuxAmiId": "ami-d5c5d1e5", - "centOS65AmiId": "ami-b6bdde86" - }, - "eu-central-1": { - "amazonLinuxAmiId": "ami-a6b0b7bb" - }, - "eu-west-1": { - "natAmiId": "ami-ed352799", - "ubuntu1404LtsAmiId": "ami-896c96fe", - "amazonLinuxAmiId": "ami-e4d18e93", - "centOS65AmiId": "ami-42718735" - }, - "ap-southeast-1": { - "natAmiId": "ami-780a432a", - "ubuntu1404LtsAmiId": "ami-9a7724c8", - "amazonLinuxAmiId": "ami-68d8e93a", - "centOS65AmiId": "ami-a08fd9f2" - }, - "ap-southeast-2": { - "natAmiId": "ami-0154c73b", - "ubuntu1404LtsAmiId": "ami-43128a79", - "amazonLinuxAmiId": "ami-db7b39e1", - "centOS65AmiId": "ami-e7138ddd" - }, - "ap-northeast-1": { - "natAmiId": "ami-5f840e5e", - "ubuntu1404LtsAmiId": "ami-bddaa2bc", - "amazonLinuxAmiId": "ami-1c1b9f1c", - "centOS65AmiId": "ami-81294380" - }, - "sa-east-1": { - "natAmiId": "ami-7660c56b", - "ubuntu1404LtsAmiId": "ami-7315b76e", - "amazonLinuxAmiId": "ami-55098148", - "centOS65AmiId": "ami-7d02a260" + "amazonLinuxAmiId": { + "ap-northeast-1": { "HVM64": "ami-393f1a57", "PV64": "ami-393f1a57" }, + "ap-northeast-2": { "HVM64": "ami-249b554a" }, + "ap-southeast-1": { "HVM64": "ami-34bd7a57", "PV64": "ami-34bd7a57" }, + "ap-southeast-2": { "HVM64": "ami-97d58af4", "PV64": "ami-97d58af4" }, + "eu-central-1": { "HVM64": "ami-794a5915", "PV64": "ami-794a5915" }, + "eu-west-1": { "HVM64": "ami-10e43b63", "PV64": "ami-10e43b63" }, + "sa-east-1": { "HVM64": "ami-d412aab8", "PV64": "ami-d412aab8" }, + "us-east-1": { "HVM64": "ami-60b6c60a", "PV64": "ami-60b6c60a" }, + "us-west-1": { "HVM64": "ami-f7e48897", "PV64": "ami-f7e48897" }, + "us-west-2": { "HVM64": "ami-b03420d1", "PV64": "ami-b03420d1" } + }, + "natAmiId": { + "ap-northeast-1": {"HVM64": "ami-5f840e5e"}, + "ap-southeast-1": {"HVM64": "ami-780a432a"}, + "ap-southeast-2": {"HVM64": "ami-0154c73b"}, + "eu-west-1": {"HVM64": "ami-ed352799"}, + "sa-east-1": {"HVM64": "ami-7660c56b"}, + "us-east-1": {"HVM64": "ami-4f9fee26"}, + "us-west-1": {"HVM64": "ami-7850793d"}, + "us-west-2": {"HVM64": "ami-69ae8259"} + }, + "centOS65AmiId": { + "ap-northeast-1": {"HVM64": "ami-81294380"}, + "ap-southeast-1": {"HVM64": "ami-a08fd9f2"}, + "ap-southeast-2": {"HVM64": "ami-e7138ddd"}, + "eu-west-1": {"HVM64": "ami-42718735"}, + "sa-east-1": {"HVM64": "ami-7d02a260"}, + "us-east-1": {"HVM64": "ami-8997afe0"}, + "us-west-1": {"HVM64": "ami-1a013c5f"}, + "us-west-2": {"HVM64": "ami-b6bdde86"} + }, + "ubuntu1404LtsAmiId": { + "ap-northeast-1": {"HVM64": "ami-bddaa2bc"}, + "ap-southeast-1": {"HVM64": "ami-9a7724c8"}, + "ap-southeast-2": {"HVM64": "ami-43128a79"}, + "eu-west-1": {"HVM64": "ami-896c96fe"}, + "sa-east-1": {"HVM64": "ami-7315b76e"}, + "us-east-1": {"HVM64": "ami-018c9568"}, + "us-west-1": {"HVM64": "ami-ee4f77ab"}, + "us-west-2": {"HVM64": "ami-6ac2a85a"} } -} \ No newline at end of file +} diff --git a/src/environmentbase/data/config.json b/src/environmentbase/data/config.json index 7605a60..cc4efa6 100644 --- a/src/environmentbase/data/config.json +++ b/src/environmentbase/data/config.json @@ -4,22 +4,9 @@ "environment_name": "environmentbase", "monitor_stack": false, "write_stack_outputs": false, - "stack_outputs_directory": "stack_outputs", - "valid_regions": [ - "ap-northeast-1", - "ap-northeast-2", - "ap-southeast-1", - "ap-southeast-2", - "eu-central-1", - "eu-west-1", - "sa-east-1", - "us-east-1", - "us-west-1", - "us-west-2" - ] + "stack_outputs_directory": "stack_outputs" }, "template": { - "ami_map_file": "ami_cache.json", "description": "Environment Generator Base deployment", "s3_bucket": "dualspark", "s3_prefix": "templates", @@ -39,68 +26,7 @@ "aws_access_key_id": null, "aws_secret_access_key": null }, - "network": { - "network_cidr_base": "10.0.0.0", - "network_cidr_size": "16", - "az_count": 3, - "subnet_types": [ - "public", - "private" - ], - "subnet_config": [ - { - "type": "public", - "size": "18", - "name": "public" - }, - { - "type": "private", - "size": "22", - "name": "private" - }, - ], - }, - "nat": { - "instance_type": "t2.micro", - "enable_ntp": false - }, - "instancetype_to_arch": { - "c1.medium": "PV64", "c1.xlarge": "PV64", "c3.2xlarge": "HVM64", "c3.4xlarge": "HVM64", - "c3.8xlarge": "HVM64", "c3.large": "HVM64", "c3.xlarge": "HVM64", "c4.2xlarge": "HVM64", - "c4.4xlarge": "HVM64", "c4.8xlarge": "HVM64", "c4.large": "HVM64", "c4.xlarge": "HVM64", - "cc2.8xlarge": "PV64", "cg1.4xlarge": "PV64", "cr1.8xlarge": "PV64", "d2.2xlarge": "HVM64", - "d2.4xlarge": "HVM64", "d2.8xlarge": "HVM64", "d2.xlarge": "HVM64", "g2.2xlarge": "HVMG2", - "g2.8xlarge": "HVM64", "hi1.4xlarge": "PV64", "hs1.8xlarge": "PV64", "i2.2xlarge": "HVM64", - "i2.4xlarge": "HVM64", "i2.8xlarge": "HVM64", "i2.xlarge": "HVM64", "m1.large": "PV64", - "m1.medium": "PV64", "m1.small": "PV64", "m1.xlarge": "PV64", "m2.2xlarge": "PV64", - "m2.4xlarge": "PV64", "m2.xlarge": "PV64", "m3.2xlarge": "HVM64", "m3.large": "HVM64", - "m3.medium": "HVM64", "m3.xlarge": "HVM64", "m4.10xlarge": "HVM64", "m4.2xlarge": "HVM64", - "m4.4xlarge": "HVM64", "m4.large": "HVM64", "m4.xlarge": "HVM64", "r3.2xlarge": "HVM64", - "r3.4xlarge": "HVM64", "r3.8xlarge": "HVM64", "r3.large": "HVM64", "r3.xlarge": "HVM64", - "t1.micro": "PV64", "t2.large": "HVM64", "t2.medium": "HVM64", "t2.micro": "HVM64", - "t2.nano": "HVM64", "t2.small": "HVM64" - }, - "image_map": { - "amazonLinuxAmiId": { - "ap-northeast-1": { "HVM64": "ami-cbf90ecb", "PV64": "ami-27f90e27" }, - "ap-southeast-1": { "HVM64": "ami-68d8e93a", "PV64": "ami-acd9e8fe" }, - "ap-southeast-2": { "HVM64": "ami-fd9cecc7", "PV64": "ami-ff9cecc5" }, - "eu-central-1": { "HVM64": "ami-a8221fb5", "PV64": "ami-ac221fb1" }, - "eu-west-1": { "HVM64": "ami-a10897d6", "PV64": "ami-bf0897c8" }, - "sa-east-1": { "HVM64": "ami-b52890a8", "PV64": "ami-bb2890a6" }, - "us-east-1": { "HVM64": "ami-1ecae776", "PV64": "ami-1ccae774" }, - "us-west-1": { "HVM64": "ami-d114f295", "PV64": "ami-d514f291" }, - "us-west-2": { "HVM64": "ami-e7527ed7", "PV64": "ami-ff527ecf" } - }, - "natAmiId": { - "ap-northeast-1": {"HVM64": "ami-5f840e5e"}, - "ap-southeast-1": {"HVM64": "ami-780a432a"}, - "ap-southeast-2": {"HVM64": "ami-0154c73b"}, - "eu-west-1": {"HVM64": "ami-ed352799"}, - "sa-east-1": {"HVM64": "ami-7660c56b"}, - "us-east-1": {"HVM64": "ami-4f9fee26"}, - "us-west-1": {"HVM64": "ami-7850793d"}, - "us-west-2": {"HVM64": "ami-69ae8259"} - } - } + "valid_regions": !include region_map.json, + "instancetype_to_arch": !include instancetype_to_arch.json, + "image_map": !include ami_cache.json } diff --git a/src/environmentbase/data/config_schema.json b/src/environmentbase/data/config_schema.json index 27b32d3..42177de 100644 --- a/src/environmentbase/data/config_schema.json +++ b/src/environmentbase/data/config_schema.json @@ -7,12 +7,9 @@ # Enable monitoring of stack event stream "monitor_stack": "bool", # Directory for writing stack outputs - "stack_outputs_directory": "basestring", - "valid_regions": "list" + "stack_outputs_directory": "basestring" }, "template": { - # Name of json file containing mapping labels to AMI ids - "ami_map_file": "basestring", # Canned ACL permissions for the template file upload # Legal values: private, public-read, public-read-write, authenticated-read, bucket-owner-read, bucket-owner-full-control "s3_upload_acl": "basestring", @@ -38,5 +35,6 @@ }, "instancetype_to_arch": { "*": "basestring" - } + }, + "valid_regions": "list" } diff --git a/src/environmentbase/data/instancetype_to_arch.json b/src/environmentbase/data/instancetype_to_arch.json new file mode 100644 index 0000000..4509935 --- /dev/null +++ b/src/environmentbase/data/instancetype_to_arch.json @@ -0,0 +1,23 @@ +{ + # Previous gen instance types (paravirtualized) + "c1.medium": "PV64", "c1.xlarge": "PV64", + "cc2.8xlarge": "PV64", + "cg1.4xlarge": "PV64", + "cr1.8xlarge": "PV64", + "hi1.4xlarge": "PV64", + "hs1.8xlarge": "PV64", + "m1.large": "PV64", "m1.medium": "PV64", "m1.small": "PV64", "m1.xlarge": "PV64", + "m2.2xlarge": "PV64", "m2.4xlarge": "PV64", "m2.xlarge": "PV64", + "t1.micro": "PV64", + + # Current gen instance types (hardware virtualized) + "c3.2xlarge": "HVM64", "c3.4xlarge": "HVM64", "c3.8xlarge": "HVM64", "c3.large": "HVM64", "c3.xlarge": "HVM64", "c4.2xlarge": "HVM64", + "c4.4xlarge": "HVM64", "c4.8xlarge": "HVM64", "c4.large": "HVM64", "c4.xlarge": "HVM64", + "d2.2xlarge": "HVM64", "d2.4xlarge": "HVM64", "d2.8xlarge": "HVM64", "d2.xlarge": "HVM64", + "g2.2xlarge": "HVMG2", "g2.8xlarge": "HVM64", + "i2.2xlarge": "HVM64", "i2.4xlarge": "HVM64", "i2.8xlarge": "HVM64", "i2.xlarge": "HVM64", + "m3.2xlarge": "HVM64", "m3.large": "HVM64", "m3.medium": "HVM64", "m3.xlarge": "HVM64", + "m4.10xlarge": "HVM64", "m4.2xlarge": "HVM64", "m4.4xlarge": "HVM64", "m4.large": "HVM64", "m4.xlarge": "HVM64", + "r3.2xlarge": "HVM64", "r3.4xlarge": "HVM64", "r3.8xlarge": "HVM64", "r3.large": "HVM64", "r3.xlarge": "HVM64", + "t2.large": "HVM64", "t2.medium": "HVM64", "t2.micro": "HVM64", "t2.nano": "HVM64", "t2.small": "HVM64" +} diff --git a/src/environmentbase/data/region_map.json b/src/environmentbase/data/region_map.json new file mode 100644 index 0000000..767cd4e --- /dev/null +++ b/src/environmentbase/data/region_map.json @@ -0,0 +1,12 @@ +[ + "ap-northeast-1", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "eu-central-1", + "eu-west-1", + "sa-east-1", + "us-east-1", + "us-west-1", + "us-west-2" +] \ No newline at end of file diff --git a/src/environmentbase/environmentbase.py b/src/environmentbase/environmentbase.py index 9147d3d..5a59c93 100644 --- a/src/environmentbase/environmentbase.py +++ b/src/environmentbase/environmentbase.py @@ -40,7 +40,7 @@ class EnvironmentBase(object): def __init__(self, view=None, env_config=EnvConfig(), - config_filename=(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0]), + config_filename=res.R.CONFIG_FILENAME, config_file_override=None): """ Init method for environment base creates all common objects for a given environment within the CloudFormation @@ -94,8 +94,8 @@ def create_hook(self): def add_config_hook(self): """ - Override in your subclass for adding custom config handlers. - Called after the other config handlers have been added. + Override in your subclass for adding custom config handlers. + Called after the other config handlers have been added. After the hook completes the view is loaded and started. """ pass @@ -141,14 +141,15 @@ def stack_event_hook(self, event_data): """ return True - def init_action(self): + def init_action(self, is_silent=False): """ Default init_action invoked by the CLI Generates config and ami_cache files Override in your subclass for custom initialization steps + @param is_silent [boolean], supress console output (for testing) """ - self.generate_config() - self.generate_ami_cache() + config_handlers = self.env_config.config_handlers + res.R.generate_config(prompt=True, is_silent=is_silent, output_filename=self.config_filename, config_handlers=config_handlers) def s3_prefix(self): """ @@ -400,7 +401,7 @@ def _validate_config_helper(self, schema, config, path): # ------------ value check ----------- if isinstance(req_value, basestring): - req_type = res.get_type(req_value) + req_type = utility.get_type(req_value) if not isinstance(config[matching_key], req_type): message = "Type mismatch in config, %s should be of type %s, not %s" % \ @@ -430,7 +431,7 @@ def _validate_region(self, config): """ Checks boto.region_name against the list of valid regions raising an exception if not. """ - valid_regions = config['global']['valid_regions'] + valid_regions = config['valid_regions'] region_name = config['boto']['region_name'] if region_name not in valid_regions: raise ValidationError('Unrecognized region name: ' + region_name) @@ -447,12 +448,16 @@ def _validate_region(self, config): error_msg = "Too many availability zones requested: network.az_count=%s but '%s' has only %s." % (az_count, region_name, actual_az_count) raise ValidationError(error_msg) - def _validate_config(self, config, factory_schema=res.CONFIG_REQUIREMENTS): + def _validate_config(self, config, factory_schema=None): """ Compares provided dict against TEMPLATE_REQUIREMENTS. Checks that required all sections and values are present and that the required types match. Throws ValidationError if not valid. :param config: dict to be validated """ + + if not factory_schema: + factory_schema = res.R.parse_file(res.Res.CONFIG_REQUIREMENTS_FILENAME, from_file=False) + config_reqs_copy = copy.deepcopy(factory_schema) # Merge in any requirements provided by config handlers @@ -535,29 +540,6 @@ def _config_env_override(config, path, print_debug=False): else: EnvironmentBase._config_env_override(config[key], new_path, print_debug=print_debug) - def generate_config(self): - """ - Generate config dictionary from defaults - Add defaults from all registered config handlers (added patterns, etc.) - Write file to self.config_filename - """ - - if os.path.isfile(self.config_filename): - overwrite = raw_input("%s already exists. Overwrite? (y/n) " % self.config_filename).lower() - print - if not overwrite == 'y': - return - - config = copy.deepcopy(res.FACTORY_DEFAULT_CONFIG) - - # Merge in any defaults provided by registered config handlers - for handler in self._config_handlers: - config.update(handler.get_factory_defaults()) - - with open(self.config_filename, 'w') as f: - f.write(json.dumps(config, indent=4, sort_keys=True, separators=(',', ': '))) - print 'Generated config file at %s\n' % self.config_filename - def load_config(self, view=None, config=None): """ Load config from self.config_filename, break if it doesn't exist @@ -574,7 +556,7 @@ def load_config(self, view=None, config=None): # Else read from file else: - config = res.load_file('', self.config_filename) + config = res.R.load_config(self.config_filename) # Load in cli config overrides view.update_config(config) @@ -598,7 +580,6 @@ def load_config(self, view=None, config=None): self.stack_monitor = monitor.StackMonitor(self.globals['environment_name']) self.stack_monitor.add_handler(self) - def initialize_template(self): """ Create new Template instance, set description and common parameters and load AMI cache. @@ -636,22 +617,6 @@ def initialize_template(self): self.template.add_utility_bucket(name=bucket_name) self.template.add_output(Output('utilityBucket', Value=bucket_name)) - def generate_ami_cache(self): - """ - Generate ami_cache.json file from defaults - """ - ami_cache_filename = res.DEFAULT_AMI_CACHE_FILENAME + res.EXTENSIONS[0] - - if os.path.isfile(ami_cache_filename): - overwrite = raw_input("%s already exists. Overwrite? (y/n) " % ami_cache_filename).lower() - print - if not overwrite == 'y': - return - - with open(ami_cache_filename, 'w') as f: - f.write(json.dumps(res.FACTORY_DEFAULT_AMI_CACHE, indent=4, separators=(',', ': '))) - print "Generated AMI cache file at %s\n" % ami_cache_filename - def to_json(self): """ Centralized method for outputting the root template with a timestamp identifying when it @@ -685,7 +650,6 @@ def add_child_template(self, child_template, merge=False, depends_on=[]): """ return self.template.add_child_template(child_template, merge=merge, depends_on=depends_on) - def write_stack_outputs_to_file(self, event_data): """ Given the stack event data, determine if the stack has finished executing (CREATE_COMPLETE or UPDATE_COMPLETE) @@ -695,7 +659,6 @@ def write_stack_outputs_to_file(self, event_data): (event_data['status'] == 'CREATE_COMPLETE' or event_data['status'] == 'UPDATE_COMPLETE'): self.write_stack_output_to_file(stack_id=event_data['id'], stack_name=event_data['name']) - def write_stack_output_to_file(self, stack_id, stack_name): """ Given a CFN stack's physical resource ID, query the stack for its outputs @@ -720,7 +683,6 @@ def write_stack_output_to_file(self, stack_id, stack_name): if self.globals['print_debug']: print "Outputs for {0} written to {1}\n".format(stack_name, stack_output_filename) - def get_stack_output(self, stack_id, output_name): """ Given the PhysicalResourceId of a Stack and a specific output key, return the output value @@ -738,14 +700,12 @@ def stack_event_hook(self, event_data): # If the output wasn't found in the stack, raise an exception raise Exception("%s did not output %s" % (stack_obj.stack_name, output_name)) - def get_cfn_stack_obj(self, stack_id): """ Given the unique physical stack ID, return exactly one cloudformation stack object """ return self.get_cfn_connection().describe_stacks(stack_id)[0] - def get_cfn_connection(self): """ We persist the CFN connection so that we don't create a new session with each request @@ -754,7 +714,6 @@ def get_cfn_connection(self): self.cfn_connection = cloudformation.connect_to_region(self.config.get('boto').get('region_name')) return self.cfn_connection - def get_sts_credentials(self, role_session_name, role_arn): """ We persist the STS credentials so that we don't create a new session with each request @@ -767,4 +726,3 @@ def get_sts_credentials(self, role_session_name, role_arn): ) self.sts_credentials = assumed_role.credentials return self.sts_credentials - diff --git a/src/environmentbase/patterns/base_network.py b/src/environmentbase/patterns/base_network.py index b779026..2ef94d8 100644 --- a/src/environmentbase/patterns/base_network.py +++ b/src/environmentbase/patterns/base_network.py @@ -35,8 +35,13 @@ class BaseNetwork(Template): ], }, "nat": { - "instance_type": "t2.micro", - "enable_ntp": False + "enable_ntp": False, + "default_instance_type": "t2.micro", + "suggested_instance_types": [ + "m1.small", "t2.micro", "t2.small", "t2.medium", + "m3.medium", + "c3.large", "c3.2xlarge" + ] } } @@ -49,7 +54,8 @@ class BaseNetwork(Template): "network_cidr_size": "basestring" }, "nat": { - "instance_type": "basestring", + "default_instance_type": "basestring", + "suggested_instance_types": "list", "enable_ntp": "bool" } } @@ -241,12 +247,14 @@ def create_subnet_egress(self, subnet_az, route_table, igw_title, subnet_type, s if self.az_nat_mapping.get(subnet_az): return - nat_instance_type = nat_config['instance_type'] + nat_default_instance_type = nat_config['default_instance_type'] + nat_suggested_instance_types = nat_config['suggested_instance_types'] nat_enable_ntp = nat_config['enable_ntp'] extra_user_data = nat_config.get('extra_user_data') ha_nat = self.create_nat( subnet_az, - nat_instance_type, + nat_default_instance_type, + nat_suggested_instance_types, nat_enable_ntp, name='HaNat' + str(subnet_az), extra_user_data=extra_user_data) @@ -263,15 +271,16 @@ def gateway_hook(self): """ pass - def create_nat(self, index, nat_instance_type, enable_ntp, name, extra_user_data=None): + def create_nat(self, index, default_instance_type, suggested_instance_types, enable_ntp, name, extra_user_data=None): """ Override to customize your NAT instance. The returned object must be a subclass of ha_nat.HaNat. """ return ha_nat.HaNat( index, - nat_instance_type, + default_instance_type, enable_ntp, + suggested_instance_types=suggested_instance_types, name=name, extra_user_data=extra_user_data) diff --git a/src/environmentbase/patterns/bastion.py b/src/environmentbase/patterns/bastion.py index 9438f10..b4c08dd 100644 --- a/src/environmentbase/patterns/bastion.py +++ b/src/environmentbase/patterns/bastion.py @@ -9,11 +9,24 @@ class Bastion(Template): Adds a bastion host within a given deployment based on environemntbase. """ - def __init__(self, name='bastion', ingress_port='2222', access_cidr='0.0.0.0/0', instance_type='t2.micro', user_data=None): + SUGGESTED_INSTANCE_TYPES = [ + "m1.small", "t2.micro", "t2.small", "t2.medium", + "m3.medium", + "c3.large", "c3.2xlarge" + ] + + def __init__(self, + name='bastion', + ingress_port='2222', + access_cidr='0.0.0.0/0', + default_instance_type='t2.micro', + suggested_instance_types=SUGGESTED_INSTANCE_TYPES, + user_data=None): """ Method initializes bastion host in a given environment deployment @param name [string] - name of the tier to assign - @param ingress_port [number] - port to allow ingress on. Must be a valid ELB ingress port. More info here: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html + @param ingress_port [number] - port to allow ingress on. Must be a valid ELB ingress port. + More info here: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html @param access_cidr [string] - CIDR notation for external access to this tier. @param user_data [string] - User data to to initialize the bastion hosts. """ @@ -21,7 +34,8 @@ def __init__(self, name='bastion', ingress_port='2222', access_cidr='0.0.0.0/0', self.name = name self.ingress_port = ingress_port self.access_cidr = access_cidr - self.instance_type = instance_type + self.default_instance_type = default_instance_type + self.suggested_instance_types = suggested_instance_types self.user_data = user_data super(Bastion, self).__init__(template_name=name) @@ -63,7 +77,8 @@ def build_hook(self): security_groups=[security_groups['bastion'], self.common_security_group], load_balancer=bastion_elb, user_data=self.user_data, - instance_type=self.instance_type + default_instance_type=self.default_instance_type, + suggested_instance_types=self.suggested_instance_types ) self.add_output(Output( diff --git a/src/environmentbase/patterns/ha_cluster.py b/src/environmentbase/patterns/ha_cluster.py index 79b85ae..d030b7f 100644 --- a/src/environmentbase/patterns/ha_cluster.py +++ b/src/environmentbase/patterns/ha_cluster.py @@ -1,7 +1,5 @@ from environmentbase.template import Template -from environmentbase import resources -from troposphere import Ref, Parameter, Base64, Join, Output, GetAtt, ec2, route53, autoscaling -import troposphere.constants as tpc +from troposphere import Ref, Parameter, Output, GetAtt, ec2, route53, autoscaling from troposphere.policies import CreationPolicy, ResourceSignal from troposphere.policies import UpdatePolicy, AutoScalingRollingUpdate @@ -22,7 +20,8 @@ def __init__(self, user_data='', env_vars={}, min_size=1, max_size=1, - instance_type='t2.micro', + default_instance_type='t2.micro', + suggested_instance_types=None, subnet_layer=None, elb_scheme=SCHEME_INTERNET_FACING, elb_listeners=[ @@ -43,7 +42,7 @@ def __init__(self, scaling_policies=None, creation_policy_timeout=None, allow_default_ingress=True): - + # This will be the name used in resource names and descriptions self.name = name @@ -61,8 +60,11 @@ def __init__(self, self.max_size = max_size # The type of instance for the autoscaling group - self.instance_type = instance_type - + self.default_instance_type = default_instance_type + + # The drop-down options offered when selecting an instance type from CloudFormation + self.suggested_instance_types = suggested_instance_types + # This is the subnet layer that the ASG is in (public, private, ...) self.subnet_layer = subnet_layer @@ -112,13 +114,12 @@ def __init__(self, # A list of dictionaries describing scaling policies to be passed to add_asg self.scaling_policies = scaling_policies - # Indicates whether ingress rules should be added to the ELB for type-appropriate CIDR ranges + # Indicates whether ingress rules should be added to the ELB for type-appropriate CIDR ranges # Internet facing ELBs would allow ingress from PUBLIC_ACCESS_CIDR and private ELBs will allow ingress from the VPC CIDR self.allow_default_ingress = allow_default_ingress super(HaCluster, self).__init__(template_name=self.name) - def build_hook(self): """ Hook to add tier-specific assets within the build stage of initializing this class. @@ -148,12 +149,11 @@ def build_hook(self): # Add the outputs for the stack self.add_outputs() - def set_subnet_layer(self): """ If the subnet layer is not passed in, use a private subnet if there are any, otherwise use a public subnet. - This needs to happen in the build hook, since subnets is not yet initialized in the constructor. You - probably won't need to override this. This logic is also duplicated in template.add_asg(), but we need to + This needs to happen in the build hook, since subnets is not yet initialized in the constructor. You + probably won't need to override this. This logic is also duplicated in template.add_asg(), but we need to set it out here so we can pass the same subnet to template.add_elb() """ if not self.subnet_layer: @@ -170,12 +170,12 @@ def add_security_groups(self): """ elb_sg_ingress_rules = [] - + if self.allow_default_ingress: # Determine ingress rules for ELB security -- open to internet for internet-facing ELB, open to VPC for internal ELB access_cidr = PUBLIC_ACCESS_CIDR if self.elb_scheme == SCHEME_INTERNET_FACING else self.vpc_cidr - # Add the ingress rules to the ELB security group + # Add the ingress rules to the ELB security group for elb_port in [listener.get('elb_port') for listener in self.elb_listeners]: elb_sg_ingress_rules.append(ec2.SecurityGroupRule(FromPort=elb_port, ToPort=elb_port, IpProtocol='tcp', CidrIp=access_cidr)) @@ -189,7 +189,7 @@ def add_security_groups(self): SecurityGroupIngress=elb_sg_ingress_rules) ) - # Create the ASG security group + # Create the ASG security group ha_cluster_sg_name = '%sSecurityGroup' % self.name ha_cluster_sg = self.add_resource( ec2.SecurityGroup( @@ -200,7 +200,10 @@ def add_security_groups(self): # Create the reciprocal rules between the ELB and the ASG for all instance ports # NOTE: The condition in the list comprehension exists because elb_port is used as a default when instance_port is not specified - cluster_sg_ingress_ports = {listener.get('instance_port') if listener.get('instance_port') else listener.get('elb_port') for listener in self.elb_listeners} + cluster_sg_ingress_ports = { + listener.get('instance_port') if listener.get('instance_port') else listener.get('elb_port') + for listener in self.elb_listeners + } # Also add the health check port to the security group rules if self.elb_health_check_port: @@ -216,7 +219,6 @@ def add_security_groups(self): return self.security_groups - def add_cluster_instance_profile(self): """ Wrapper method to encapsulate process of adding the IAM role for the autoscaling group @@ -224,7 +226,6 @@ def add_cluster_instance_profile(self): """ self.instance_profile = None - def add_cluster_elb(self): """ Wrapper method to encapsulate process of creating the ELB for the autoscaling group @@ -247,7 +248,6 @@ def add_cluster_elb(self): health_check_path=self.elb_health_check_path ) - def add_cname(self): """ Wrapper method to encapsulate process of creating a CNAME DNS record for the ELB @@ -272,15 +272,13 @@ def add_cname(self): TTL='300', ResourceRecords=[GetAtt(self.cluster_elb, 'DNSName')])) - def add_user_data(self): """ Wrapper method to encapsulate process of constructing userdata for the autoscaling group - Sets self.user_data_payload constructed from the passed in user_data and env_vars + Sets self.user_data_payload constructed from the passed in user_data and env_vars """ self.user_data_payload = self.construct_user_data(self.env_vars, self.user_data) - def add_cluster_asg(self): """ Wrapper method to encapsulate process of creating the autoscaling group @@ -292,7 +290,8 @@ def add_cluster_asg(self): load_balancer=self.cluster_elb, ami_name=self.ami_name, user_data=self.user_data_payload, - instance_type=self.instance_type, + default_instance_type=self.default_instance_type, + suggested_instance_types=self.suggested_instance_types, min_size=self.min_size, max_size=self.max_size, subnet_layer=self.subnet_layer, diff --git a/src/environmentbase/patterns/ha_nat.py b/src/environmentbase/patterns/ha_nat.py index 1dab65f..e64d6b8 100644 --- a/src/environmentbase/patterns/ha_nat.py +++ b/src/environmentbase/patterns/ha_nat.py @@ -14,14 +14,15 @@ class HaNat(Template): a route directing egress traffic from the private subnet through this NAT ''' - def __init__(self, subnet_index, instance_type='t2.micro', enable_ntp=False, name='HaNat', extra_user_data=None): + def __init__(self, subnet_index, default_instance_type='t2.micro', enable_ntp=False, suggested_instance_types=None, name='HaNat', extra_user_data=None): ''' Method initializes HA NAT in a given environment deployment @param subnet_index [int] ID of the subnet that the NAT instance will be deployed to - @param instance_type [string] - Type of NAT instance in the autoscaling group + @param default_instance_type [string] - Type of NAT instance in the autoscaling group ''' self.subnet_index = subnet_index - self.instance_type = instance_type + self.default_instance_type = default_instance_type + self.suggested_instance_types = suggested_instance_types self.enable_ntp = enable_ntp self.extra_user_data = extra_user_data @@ -132,10 +133,10 @@ def add_nat_instance_profile(self): def add_nat_asg(self): - user_data = [resources.get_resource('nat_takeover.sh')] + user_data = [resources.R.load_resource('nat_takeover.sh')] if self.enable_ntp: - user_data.append(resources.get_resource('ntp_takeover.sh')) + user_data.append(resources.R.load_resource('ntp_takeover.sh')) if self.extra_user_data: user_data.append(open(self.extra_user_data).read()) @@ -149,8 +150,8 @@ def add_nat_asg(self): " --region ", {"Ref": "AWS::Region"} ]) - image_id_expr = self.get_ami_expr(self.instance_type, 'natAmiId', 'HaNat') - instancetype_param = self.get_instancetype_param(self.instance_type, 'HaNat') + image_id_expr = self.get_ami_expr(self.default_instance_type, 'natAmiId', 'HaNat', self.suggested_instance_types) + instancetype_param = self.get_instancetype_param(self.default_instance_type, 'HaNat') nat_launch_config = self.add_resource(LaunchConfiguration( "Nat%sLaunchConfig" % str(self.subnet_index), diff --git a/src/environmentbase/resources.py b/src/environmentbase/resources.py index cc4ec76..35758bc 100644 --- a/src/environmentbase/resources.py +++ b/src/environmentbase/resources.py @@ -1,126 +1,241 @@ from pkg_resources import resource_string, resource_exists -import yaml, json +import copy +import yaml +import json import os +import re -def _test_filelike(parent, basename, validator): - """ - Tests various file extension to find the requested file like resource - :param parent: parent directory - :param basename: portion of filename excluding the file extension - :return: file path of the resource that exists, or None - """ - (basename, provided_suffix) = os.path.splitext(basename) - suffix_list = EXTENSIONS - if provided_suffix == '' or provided_suffix not in suffix_list: - suffix_list = [provided_suffix] + EXTENSIONS +# Declare R to be the singleton Resource instance +R = None - for extension in suffix_list: - file_path = os.path.join(parent, basename + extension) - if validator(file_path): - return file_path - return None +class Res(object): + CONFIG_REQUIREMENTS_FILENAME = 'config_schema.json' + CONFIG_FILENAME = "config.json" + IMAGE_MAP_FILENAME = "ami_cache.json" + INSTANCETYPE_MAP_FILENAME = "instancetype_to_arch.json" + REGION_MAP_FILENAME = "region_map.json" -def test_resource(parent, basename, relative_to_module_name=__name__): - resource_test = lambda file_path: resource_exists(relative_to_module_name, file_path) - return _test_filelike(parent, basename, resource_test) + DEFAULT_DATA_PATH = "data" + # Configure resource loading for yaml parser (i.e. yaml.load()) + _INCLUDE_RESOURCE_MODULE = __name__ + _INCLUDE_RESOURCE_INTERNAL_PATH = DEFAULT_DATA_PATH -def test_file(parent, basename): - file_test = lambda file_path: os.path.isfile(file_path) - return _test_filelike(parent, basename, file_test) - - -def get_yaml_resource(resource_name, relative_to_module_name=__name__): - """ - Get package resource as json - """ - return yaml.load(get_resource(resource_name, relative_to_module_name)) - - -def get_resource(resource_name, relative_to_module_name=__name__): - """ - Retrieves resource embedded in the package (even if installed as a zipped archive). - """ - file_path = test_resource('data', resource_name, relative_to_module_name) - file_content = resource_string(relative_to_module_name, file_path) - return file_content - -EXTENSIONS = ['.json', '.yaml', '.yml'] - -DEFAULT_CONFIG_FILENAME = 'config' -FACTORY_DEFAULT_CONFIG = get_yaml_resource(DEFAULT_CONFIG_FILENAME) - - -DEFAULT_AMI_CACHE_FILENAME = 'ami_cache' -FACTORY_DEFAULT_AMI_CACHE = get_yaml_resource(DEFAULT_AMI_CACHE_FILENAME) - - -CONFIG_REQUIREMENTS_FILENAME = 'config_schema' -CONFIG_REQUIREMENTS = get_yaml_resource(CONFIG_REQUIREMENTS_FILENAME) - - -COMMON_STRINGS_FILENAME = 'common_strings' -COMMON_STRINGS = get_yaml_resource(COMMON_STRINGS_FILENAME) - - -def load_file(parent, basename): - file_path = test_file(parent, basename) - if not file_path: - raise Exception("%s does not exist. Try running the init command to generate it.\n" % (basename + EXTENSIONS[0])) - - return load_yaml_file(file_path) - -def load_yaml_file(file_path): - - if not os.path.isfile(file_path): - raise Exception('{} does not exist'.format(file_path)) - - with open(file_path, 'r') as f: - try: - content = f.read() - parsed_content = yaml.load(content) - except ValueError: - print '%s could not be parsed' % file_path - raise - - return parsed_content - -def load_json_file(file_path): - - if not os.path.isfile(file_path): - raise Exception('{} does not exist'.format(file_path)) - - with open(file_path, 'r') as f: - try: - content = f.read() - parsed_content = json.loads(content) - except ValueError: - print '%s could not be parsed' % file_path - raise + # Generated config sections to break out to separate files + _EXTRACTED_CONFIG_SECTIONS = { + 'image_map': IMAGE_MAP_FILENAME, + 'instancetype_to_arch': INSTANCETYPE_MAP_FILENAME, + 'valid_regions': REGION_MAP_FILENAME + } - return parsed_content + # Resource cache, prevents multiple loads of the same file + _loaded_files = {} + + def __init__(self): + # Set PyYAML's '!include' constructor to use the file loader + # Any function that changes this should set it back before exiting + yaml.add_constructor("!include", Res._yaml_file_include) + + # Implimentation of "!include" directive for yaml parser to load YAML content from egg archive resource + @staticmethod + def _yaml_resource_include(loader, node): + content = R.load_resource( + node.value, + module=Res._INCLUDE_RESOURCE_MODULE, + internal_path=Res._INCLUDE_RESOURCE_INTERNAL_PATH) + return yaml.load(content) + + # Implimentation of "!include" directive for yaml parser to load YAML content from filesystem resource + @staticmethod + def _yaml_file_include(loader, node): + # Get the path out of the yaml file + file_name = os.path.join(os.path.dirname(loader.name), node.value) + if os.path.isfile(file_name): + with file(file_name) as inputfile: + return yaml.load(inputfile) + else: + raise Exception("Could not load file '%s'" % node.value) + + def load_resource(self, + filename, + module=None, + internal_path=None): + """ + @param filename [string] The name of the file withn the egg archive + @param module [string] A module name within the egg archive, 'internal_path' + must be sibling to this location within the archive. Typically magic var '__name__'. + @param internal_path [string] File path prepended to filename e.g. / + Return content of a resource embedded within an egg archive. + """ + + # Can't set with param vaules for some reason + if not module: + module = Res._INCLUDE_RESOURCE_MODULE + if not internal_path: + internal_path = Res._INCLUDE_RESOURCE_INTERNAL_PATH + + # Attempt to retreive cached content + key = "%s:%s:%s" % (module, internal_path, filename) + if key in self._loaded_files: + return self._loaded_files[key] + + filepath = os.path.join(internal_path, filename) + + if not resource_exists(module, filepath): + raise Exception("Resource '%s' not found in module '%s'" % (filename, module)) + + file_content = resource_string(module, filepath) + + # cache file_content + self._loaded_files[key] = file_content + + return file_content + + def parse_file(self, filename, from_file=True): + """ + Read file into python data structure from either EGG archive or local filesystem. + Note: File may contain !include references to other files relative to the requested file. + @param filename [string] Name of file to load. + @param from_file [boolean] If true loades files from fs otherwise file loaded from resource + path using _INCLUDE_RESOURCE_MODULE and _INCLUDE_RESOURCE_INTERNAL_PATH. + """ + # Load file content from file or resource path + if from_file: + with file(filename) as f: + content = f.read() + else: + content = self.load_resource( + filename, + module=Res._INCLUDE_RESOURCE_MODULE, + internal_path=Res._INCLUDE_RESOURCE_INTERNAL_PATH) + + # Configure PyYAML to process '!include' directive with correct handler function + if not from_file: + yaml.add_constructor("!include", Res._yaml_resource_include) + + # parse and return + parsed_content = yaml.load(content) + + # Set PyYAML's !include back to loading from files + if not from_file: + yaml.add_constructor("!include", Res._yaml_file_include) + + return parsed_content + + def load_config(self, config_filename=CONFIG_FILENAME): + config = self.parse_file(config_filename) + return config + + def _extract_config_section(self, config, config_key, filename, prompt=False): + """ + Write requested config section to file and replace config value with a sentinel value to + be processed later into a valid '!include' directive. The sentinel is a string containing + the correct include directive. + @parse config [list|dict] The config datastructure to be modified with a template token. + @param config_key [string] The config key to be externalized. + @param filename [string] The name of the file created to hold config[config_key] + @param prompt [boolean] block for user input to abort file output if file already exists + """ + + # If file exists ask user if we should proceed + if prompt and os.path.isfile(filename): + overwrite = raw_input("%s already exists. Overwrite? (y/n) " % filename).lower() + print + if not overwrite == 'y': + return + + section = config.get(config_key) + + # Output file + with open(filename, 'w') as f: + content = json.dumps(section, indent=4, separators=(',', ': '), sort_keys=True) + f.write(content) + + config[config_key] = "!include %s" % filename + + def generate_config(self, + config_file=CONFIG_FILENAME, + output_filename=None, + config_handlers=list(), + extract_map=_EXTRACTED_CONFIG_SECTIONS, + prompt=False, + is_silent=False): + """ + Copies specified yaml/json file from the EGG resource to current directory, default is 'conifg.json'. Optionally + split out specific sections into separate files using extract_map. Additionally us config_handlers to add in + additional conifg content before serializing content to file. + @param config_file [string] Name of file within resource path to load. + @param output_file [string] Name of generated config file (default is same as 'config_file') + @param prompt [boolean] block for user input to abort file output if file already exists + @param is_silent [boolena] supress console output (primarly for testing) + @param extract_map [map] Specifies top-level sections of config to externalize to separate file. + Where key=config section name, value=filename. + @param config_handlers [list(objects)] Config handlers should resemble the following: + class CustomHandler(object): + @staticmethod + def get_factory_defaults(): + return custom_config_addition + @staticmethod + def get_config_schema(): + return custom_config_validation + """ + # Output same file name as the input unless specified otherwise + if not output_filename: + output_filename = config_file + + # Load config from egg + config = self.parse_file(config_file, from_file=False) + + # Merge in any defaults provided by registered config handlers + for handler in config_handlers: + config.update(handler.get_factory_defaults()) + + # Make changes to a new copy of the config + config_copy = copy.deepcopy(config) + + # Since the !include references are not standard json we need to use special values we can + # find and replace after serializing to string. + + # Write config sections to file and replace content with "!include" string. + for section_key, filename in extract_map.iteritems(): + self._extract_config_section(config_copy, section_key, filename, prompt) + if not is_silent: + print "Generated %s file at %s\n" % (section_key, filename) + + # Serialize config to string + templatized_config_string = json.dumps(config_copy, indent=4, separators=(',', ': '), sort_keys=True) + + # Replace encoded 'include' with the real one using regex. + # This amounts to capturing the quoted string and stripping off the quotes + final_config_string = re.sub(r"\"!include ([a-zA-Z0-9_.\\-]*)\"", + lambda m: m.group(0)[1:-1], + templatized_config_string) + + # If file exists ask user if we should proceed + if prompt and os.path.isfile(output_filename): + overwrite = raw_input("%s already exists. Overwrite? (y/n) " % output_filename).lower() + print + if not overwrite == 'y': + return + + # Finally write config.json to file + with open(output_filename, 'w') as f: + f.write(final_config_string) + if not is_silent: + print "Generated config file at %s\n" % 'config.json' + + return final_config_string + + +# Assign singleton Resource instance now that the class is defined +R = Res() + +COMMON_STRINGS_FILENAME = 'common_strings.json' +COMMON_STRINGS = R.parse_file(COMMON_STRINGS_FILENAME, from_file=False) def get_str(key, default=None): return COMMON_STRINGS.get(key, default) - - -def get_type(typename): - """ - Convert typename to type object - :param typename: String name of type - :return: __builtin__ type instance - """ - types = { - 'bool': bool, - 'int': int, - 'float': float, - # avoid all the python unicode weirdness by making all the strings basestrings - 'str': basestring, - 'basestring': basestring, - 'list': list - } - return types.get(typename, None) diff --git a/src/environmentbase/scripts/region_arch_2_ami.py b/src/environmentbase/scripts/region_arch_2_ami.py index 4b5eae7..09e3e0d 100644 --- a/src/environmentbase/scripts/region_arch_2_ami.py +++ b/src/environmentbase/scripts/region_arch_2_ami.py @@ -104,8 +104,8 @@ def filter_fun(ami): if __name__ == '__main__': - ami_map = {'PV64': {}, 'HVM64': {}} regions = get_region_list() + ami_map = {region_name: {} for region_name in regions} print 'Regions %s' % regions for region in regions: @@ -120,14 +120,14 @@ def filter_fun(ami): hvm_ami = get_hvm_ami(filtered_amis) if hvm_ami: - ami_map['HVM64'][region] = hvm_ami["ImageId"] + ami_map[region]['HVM64'] = hvm_ami["ImageId"] print json.dumps(hvm_ami, indent=4, separators=(',', ': ')) else: print '* No HVM hits for region', region pv_ami = get_pv_ami(filtered_amis) if pv_ami: - ami_map['PV64'][region] = pv_ami["ImageId"] + ami_map[region]['PV64'] = pv_ami["ImageId"] print json.dumps(pv_ami, indent=4, separators=(',', ': ')) else: print '* No PV hits for region', region diff --git a/src/environmentbase/template.py b/src/environmentbase/template.py index 977cd1b..6812543 100644 --- a/src/environmentbase/template.py +++ b/src/environmentbase/template.py @@ -627,7 +627,8 @@ def get_ami_expr(self, default_instance_type, image_name, layer_name, allowed_in def add_asg(self, layer_name, instance_profile=None, - instance_type='t2.micro', + default_instance_type='t2.micro', + suggested_instance_types=None, ami_name='amazonLinuxAmiId', ec2_key=None, user_data=None, @@ -655,7 +656,8 @@ def add_asg(self, Wrapper method used to create an EC2 Launch Configuration and Auto Scaling group @param layer_name [string] friendly name of the set of instances being created - will be set as the name for instances deployed @param instance_profile [Troposphere.iam.InstanceProfile] IAM Instance Profile object to be applied to instances launched within this Auto Scaling group - @param instance_type [string] Reference to the AWS EC2 Instance Type to deploy. + @param default_instance_type [string] Reference to the AWS EC2 Instance Type to deploy. + @param suggested_instance_types [list] Instance types populating drop-down box when running template on Cloudformation. @param ami_name [string] Name of the AMI to deploy as defined within the RegionMap lookup for the deployed region @param ec2_key [Troposphere.Parameter | Troposphere.Ref(Troposphere.Parameter)] Input parameter used to gather the name of the EC2 key to use to secure access to instances launched within this Auto Scaling group @param user_data [string[]] Array of strings (lines of bash script) to be set as the user data as a bootstrap script for instances launched within this Auto Scaling group @@ -679,8 +681,8 @@ def add_asg(self, elif ec2_key is None: ec2_key = Ref(self.parameters['ec2Key']) - if type(instance_type) != str: - instance_type = Ref(instance_type) + if type(default_instance_type) != str: + default_instance_type = Ref(default_instance_type) raise TemplateValueError("Tempalte.add_asg::instance_type should be String") sg_list = [] @@ -707,8 +709,8 @@ def add_asg(self, if not associate_public_ip: associate_public_ip = True if subnet_type == 'public' else False - image_id_expr = self.get_ami_expr(instance_type, ami_name, layer_name) - instancetype_param = self.get_instancetype_param(instance_type, layer_name) + image_id_expr = self.get_ami_expr(default_instance_type, ami_name, layer_name, allowed_instance_types=suggested_instance_types) + instancetype_param = self.get_instancetype_param(default_instance_type, layer_name) launch_config_obj = autoscaling.LaunchConfiguration( layer_name + 'LaunchConfiguration', diff --git a/src/environmentbase/utility.py b/src/environmentbase/utility.py index 25e298d..d0a24e3 100644 --- a/src/environmentbase/utility.py +++ b/src/environmentbase/utility.py @@ -17,6 +17,24 @@ def first_letter_capitalize(the_string): return the_string[:1].capitalize() + the_string[1:] +def get_type(typename): + """ + Convert typename to type object + :param typename: String name of type + :return: __builtin__ type instance + """ + types = { + 'bool': bool, + 'int': int, + 'float': float, + # avoid all the python unicode weirdness by making all the strings basestrings + 'str': basestring, + 'basestring': basestring, + 'list': list + } + return types.get(typename, None) + + def _get_boto_session(boto_config): if not boto_config.get('session'): boto_config['session'] = boto3.session.Session(region_name=boto_config['region_name']) @@ -63,7 +81,7 @@ def get_template_from_s3(config, template_resource_path): get_boto_client(config, "s3").download_file(s3_bucket, template_resource_path, file_path) # Parse the template as json and return the dictionary - return res.load_json_file(file_path) + return res.R.parse_file(file_path) def get_stack_params_from_parent_template(parent_template_contents, stack_name): diff --git a/src/examples/basic.py b/src/examples/basic.py index 6627c91..6c29af6 100755 --- a/src/examples/basic.py +++ b/src/examples/basic.py @@ -1,6 +1,8 @@ +from environmentbase.environmentbase import EnvConfig from environmentbase.networkbase import NetworkBase from environmentbase.patterns.bastion import Bastion from environmentbase.patterns.ha_cluster import HaCluster +from environmentbase.patterns.base_network import BaseNetwork class MyEnvClass(NetworkBase): @@ -20,7 +22,9 @@ def create_hook(self): self.add_child_template(HaCluster( name="MyCluster", min_size=2, max_size=3, - instance_type='t2.micro')) + default_instance_type='t2.micro', + suggested_instance_types=['t2.micro'])) if __name__ == '__main__': - MyEnvClass() + env_config = EnvConfig(config_handlers=[BaseNetwork]) + MyEnvClass(env_config=env_config) diff --git a/src/tests/test_environmentbase.py b/src/tests/test_environmentbase.py index c95c19c..a529f96 100644 --- a/src/tests/test_environmentbase.py +++ b/src/tests/test_environmentbase.py @@ -42,7 +42,7 @@ def _create_dummy_config(self, env_base=None): dummy_int = 3 dummy_list = ['A', 'B', 'C'] - config_requirements = copy.deepcopy(res.CONFIG_REQUIREMENTS) + config_requirements = res.R.parse_file(res.Res.CONFIG_REQUIREMENTS_FILENAME, from_file=False) if env_base: for handler in env_base.config_handlers: @@ -50,18 +50,21 @@ def _create_dummy_config(self, env_base=None): config = {} for (section, keys) in config_requirements.iteritems(): - config[section] = {} - for (key, key_type) in keys.iteritems(): - if key_type == basestring.__name__ or key_type == str.__name__: - config[section][key] = dummy_string - elif key_type == bool.__name__: - config[section][key] = dummy_bool - elif key_type == int.__name__: - config[section][key] = dummy_int - elif key_type == list.__name__: - config[section][key] = dummy_list - - config['boto']['region_name'] = config['global']['valid_regions'][0] + if "list" in keys: + config[section] = ['us-west-2'] + else: + config[section] = {} + for (key, key_type) in keys.iteritems(): + if key_type == basestring.__name__ or key_type == str.__name__: + config[section][key] = dummy_string + elif key_type == bool.__name__: + config[section][key] = dummy_bool + elif key_type == int.__name__: + config[section][key] = dummy_int + elif key_type == list.__name__: + config[section][key] = dummy_list + + config['boto']['region_name'] = config['valid_regions'][0] return config def _create_local_file(self, name, content): @@ -115,26 +118,23 @@ def process_request(self, controller): self.assertEqual(actions_called['delete'], 1) def test_config_yaml(self): - """ Make sure load_config can load yaml files.""" - with open("config.yaml", 'w') as f: - f.write(yaml.dump(res.FACTORY_DEFAULT_CONFIG, default_flow_style=False)) + """ Verify load_config can load non-default files """ + alt_config_filename = 'config.yaml' + config = res.R.parse_file(res.Res.CONFIG_FILENAME, from_file=False) + + with open(alt_config_filename, 'w') as f: + f.write(yaml.dump(config, default_flow_style=False)) f.flush() fake_cli = self.fake_cli(['create', '--config-file', 'config.yaml']) - base = eb.EnvironmentBase(fake_cli) + base = eb.EnvironmentBase(fake_cli, config_filename=alt_config_filename) base.load_config() self.assertEqual(base.config['global']['environment_name'], 'environmentbase') - - def test_config_override(self): """ Make sure local config files overrides default values.""" - # We don't care about the AMI cache for this test, - # but the file has to exist and to contain valid json - self._create_local_file(res.DEFAULT_AMI_CACHE_FILENAME + res.EXTENSIONS[0], '{}') - # Create a local config file and verify that it overrides the factory default config = self._create_dummy_config() @@ -142,7 +142,7 @@ def test_config_override(self): original_value = config['global']['environment_name'] config['global']['environment_name'] = original_value + 'dummy' - with open(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0], 'w') as f: + with open(res.Res.CONFIG_FILENAME, 'w') as f: f.write(yaml.dump(config)) f.flush() @@ -161,8 +161,8 @@ def test_config_override(self): base.load_config() # remove config.json and create the alternate config file - os.remove(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0]) - self.assertFalse(os.path.isfile(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0])) + os.remove(res.Res.CONFIG_FILENAME) + self.assertFalse(os.path.isfile(res.Res.CONFIG_FILENAME)) with open(config_filename, 'w') as f: f.write(yaml.dump(config)) @@ -198,8 +198,11 @@ def test_config_validation(self): cntrl._validate_config(invalid_config) # Check missing key validation - (key, value) = keys.items()[0] - del valid_config[section][key] + if isinstance(keys, list): + value = keys.pop() + else: + (key, value) = keys.items()[0] + del valid_config[section][key] with self.assertRaises(eb.ValidationError): cntrl._validate_config(valid_config) @@ -211,8 +214,9 @@ def test_config_validation(self): cntrl._validate_config(valid_config) # Check wildcard sections + config_reqs = res.R.parse_file(res.Res.CONFIG_REQUIREMENTS_FILENAME, from_file=False) extra_reqs = {'*-db': {'host': 'str', 'port': 'int'}} - extra_reqs.update(res.CONFIG_REQUIREMENTS) + extra_reqs.update(config_reqs) valid_config.update({ 'my-db': {'host': 'localhost', 'port': 3306}, @@ -226,7 +230,7 @@ def test_config_validation(self): 'deeper': { 'key': 'str' }}}} - extra_reqs.update(res.CONFIG_REQUIREMENTS) + extra_reqs.update(config_reqs) valid_config.update({ 'lets': { @@ -245,62 +249,64 @@ def get_factory_defaults(): @staticmethod def get_config_schema(): - return {'new_section': {'new_key': 'str'}} + return {'new_section': {'new_key': 'basestring'}} class MyEnvBase(eb.EnvironmentBase): pass view = self.fake_cli(['init']) - env_config=eb.EnvConfig(config_handlers=[MyConfigHandler]) + env_config = eb.EnvConfig(config_handlers=[MyConfigHandler]) controller = MyEnvBase( view=view, env_config=env_config ) - controller.init_action() + controller.init_action(is_silent=True) controller.load_config() # Make sure the runtime config and the file saved to disk have the new parameter self.assertEquals(controller.config['new_section']['new_key'], 'value') - with open(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0], 'r') as f: + with open(res.Res.CONFIG_FILENAME, 'r') as f: external_config = yaml.load(f) self.assertEquals(external_config['new_section']['new_key'], 'value') # Check extended validation # recreate config file without 'new_section' and make sure it fails validation - os.remove(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0]) + os.remove(res.Res.CONFIG_FILENAME) dummy_config = self._create_dummy_config() - self._create_local_file(res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0], json.dumps(dummy_config, indent=4)) + self._create_local_file(res.Res.CONFIG_FILENAME, json.dumps(dummy_config, indent=4)) with self.assertRaises(eb.ValidationError): base = MyEnvBase(view=view, env_config=env_config) base.load_config() - def test_generate_config(self): """ Verify cli flags update config object """ # Verify that debug and output are set to the factory default base = eb.EnvironmentBase(self.fake_cli(['init'])) - base.init_action() + res.R.generate_config(prompt=True, is_silent=True) + base.load_config() + + factory_config = res.R.parse_file(res.Res.CONFIG_FILENAME, from_file=False) self.assertEqual(base.config['global']['print_debug'], - res.FACTORY_DEFAULT_CONFIG['global']['print_debug']) + factory_config['global']['print_debug']) self.assertEqual(base.config['global']['environment_name'], - res.FACTORY_DEFAULT_CONFIG['global']['environment_name']) + factory_config['global']['environment_name']) def test_template_file_flag(self): # verify that the --template-file flag changes the config value dummy_value = 'dummy' base = eb.EnvironmentBase(self.fake_cli(['create', '--template-file', dummy_value])) - base.init_action() + base.init_action(is_silent=True) base.load_config() self.assertEqual(base.config['global']['environment_name'], dummy_value) def test_config_file_flag(self): dummy_value = 'dummy' base = eb.EnvironmentBase(self.fake_cli(['create', '--config-file', dummy_value])) - base.init_action() + base.init_action(is_silent=True) self.assertTrue(os.path.isfile(dummy_value)) def test_factory_default(self): @@ -308,17 +314,15 @@ def test_factory_default(self): base = eb.EnvironmentBase(self.fake_cli(['init'])) base.load_config() - - # Create refs to files that should be created and make sure they don't already exists - config_file = os.path.join(self.temp_dir, res.DEFAULT_CONFIG_FILENAME + res.EXTENSIONS[0]) - ami_cache_file = os.path.join(self.temp_dir, res.DEFAULT_AMI_CACHE_FILENAME + res.EXTENSIONS[0]) + config_file = os.path.join(self.temp_dir, res.Res.CONFIG_FILENAME) + ami_cache_file = os.path.join(self.temp_dir, res.Res.CONFIG_FILENAME) self.assertFalse(os.path.isfile(config_file)) self.assertFalse(os.path.isfile(ami_cache_file)) # Verify that create_missing_files works as intended base = eb.EnvironmentBase(self.fake_cli(['init'])) - base.init_action() + base.init_action(is_silent=True) self.assertTrue(os.path.isfile(config_file)) # TODO: After ami_cache is updated change 'create_missing_files' to be singular # self.assertTrue(os.path.isfile(ami_cache_file)) @@ -326,7 +330,6 @@ def test_factory_default(self): # Verify that the previously created files are loaded up correctly eb.EnvironmentBase(self.fake_cli(['create'])) - # The following two tests use a create_action, which currently doesn't test correctly # def test_controller_subclass(self): @@ -341,7 +344,6 @@ def test_factory_default(self): # res = ec2.Instance("ec2instance", InstanceType="m3.medium", ImageId="ami-951945d0") # self.template.add_resource(res) - # # Initialize the the controller with faked 'create' CLI parameter # with patch.object(sys, 'argv', ['environmentbase', 'init']): # ctrlr = MyController(cli.CLI(quiet=True)) @@ -398,7 +400,6 @@ def test_factory_default(self): # self.assertEqual(len(policy['PolicyDocument']['Statement']), 2) # self.assertEqual(policy['PolicyDocument']['Statement'][1]['Action'], ['DummyAction']) - # Cloudformation doesn't currently support a dry run, so this test would create a live stack # def test_deploy(self): # with patch.object(sys, 'argv', [ diff --git a/src/tests/test_resources.py b/src/tests/test_resources.py new file mode 100644 index 0000000..e75519c --- /dev/null +++ b/src/tests/test_resources.py @@ -0,0 +1,143 @@ +from unittest2 import TestCase +from environmentbase.resources import Res +from tempfile import mkdtemp +import shutil +import os +import json +import yaml + + +class ResourcesTestCase(TestCase): + + def setUp(self): + Res._INCLUDE_RESOURCE_MODULE = 'environmentbase.resources' + Res._INCLUDE_RESOURCE_INTERNAL_PATH = 'data' + + self.r = Res() + + # Change to a temp dir so auto generated file don't clutter the os + self.temp_dir = mkdtemp() + os.chdir(self.temp_dir) + + def tearDown(self): + # Delete any files left in the temp dir + shutil.rmtree(self.temp_dir) + assert not os.path.isdir(self.temp_dir) + + def test_load_resource(self): + # validate load from usual location + content = self.r.load_resource('config.json') + self.assertIn('global', content) + + # validate load from custom location + content = self.r.load_resource('amzn_linux_ec2.json', module=__name__, internal_path='resources') + self.assertIn("Sample Template", content) + + # Validate failure scenerios + with self.assertRaises(Exception): + self.r.load_resource('fake_file') + + with self.assertRaises(Exception): + self.r.load_resource('config.json', resource_dir='wrong_path') + + with self.assertRaises(Exception): + self.r.load_resource('config.json', module="wrong_module") + + def test_parse_file(self): + + # Configure resource loading to read from the test module + Res._INCLUDE_RESOURCE_MODULE = __name__ + Res._INCLUDE_RESOURCE_INTERNAL_PATH = 'resources' + + # Verify resource loading: + parsed_content = self.r.parse_file('amzn_linux_ec2.json', from_file=False) + self.assertIn("Sample Template", parsed_content['Description']) + + # Save this file to the current directory (the temp dir for this test run) + # with a modified description for verification + parsed_content['Description'] = 'Blaaa' + with open('amzn_linux_ec2.json', 'w') as f: + f.write(json.dumps(parsed_content, indent=4, separators=(',', ': '))) + + # Verify file loading + parsed_content = self.r.parse_file('amzn_linux_ec2.json', from_file=True) + self.assertEquals("Blaaa", parsed_content['Description']) + + def test_extract_config_section(self): + # Create bogus config object + config = { + 'a': 'don\'t extract', + 'b': {'map': 'extract', 'test': range(1, 10)} + } + + # verify extraction of complex structure + self.r._extract_config_section(config, 'b', 'b.json') + self.assertEquals(config['b'], '!include b.json') + with file('b.json') as f: + content = f.read() + parsed_content = yaml.load(content) + self.assertEquals(parsed_content, {'map': 'extract', 'test': range(1, 10)}) + + # verify unextracted section was not changed + self.assertEquals(config['a'], 'don\'t extract') + + def test_generate_config(self): + # Configure resource loading to read from the test module + Res._INCLUDE_RESOURCE_MODULE = __name__ + Res._INCLUDE_RESOURCE_INTERNAL_PATH = 'resources' + + custom_config_addition = { + 'custom': { + 'a': 'don\'t extract', + 'b': {'map': 'extract', 'test': range(1, 10)} + }, + "AWSTemplateFormatVersion": "2016-02-05" + } + + custom_config_validation = { + 'custom': { + 'a': 'basestring', + 'b': {'map': 'basestring', 'test': 'list'} + } + } + + class CustomHandler(object): + @staticmethod + def get_factory_defaults(): + return custom_config_addition + + @staticmethod + def get_config_schema(): + return custom_config_validation + + # Treat sample cfn template as if it were a config file for testing purposes + self.r.generate_config( + is_silent=True, + config_file='amzn_linux_ec2.json', + extract_map={ + "Description": "description.json", + "Parameters": "params.json", + "Mappings": "mappings.json", + "Resources": "resources.json", + "Outputs": "output.json" + }, + config_handlers=[CustomHandler()] + ) + + # Make sure all the extracted files exist + with file('amzn_linux_ec2.json') as f: + content = f.read() + for inc_file in ['description.json', 'params.json', 'mappings.json', 'resources.json', 'output.json']: + self.assertIn('!include %s' % inc_file, content) + + # Read the JSON into Python data structure + parsed_content = self.r.parse_file('amzn_linux_ec2.json', from_file=True) + + # Verify you got a datastructure back + self.assertTrue(isinstance(parsed_content, dict)) + + # Verify new config section is added + self.assertEquals(parsed_content['custom'], custom_config_addition['custom']) + + # Verify modified config section is modified + self.assertEquals(parsed_content['AWSTemplateFormatVersion'], "2016-02-05")