From ad331edbf2b0c839fef111ced23827e8966aae45 Mon Sep 17 00:00:00 2001 From: marcinromaszewicz Date: Mon, 16 Jul 2018 10:52:08 -0700 Subject: [PATCH 1/4] Blueprints for EKS cluster and workers Simple blueprints to create an EKS cluster, and to create workers in an AutoScalngGroup which connect to that master. --- stacker_blueprints/eks.py | 427 +++++++++++++++++++++ tests/fixtures/blueprints/eks_cluster.json | 63 +++ tests/fixtures/blueprints/eks_workers.json | 376 ++++++++++++++++++ tests/test_eks.py | 54 +++ 4 files changed, 920 insertions(+) create mode 100644 stacker_blueprints/eks.py create mode 100644 tests/fixtures/blueprints/eks_cluster.json create mode 100644 tests/fixtures/blueprints/eks_workers.json create mode 100644 tests/test_eks.py diff --git a/stacker_blueprints/eks.py b/stacker_blueprints/eks.py new file mode 100644 index 00000000..aa250fc0 --- /dev/null +++ b/stacker_blueprints/eks.py @@ -0,0 +1,427 @@ +from awacs.aws import ( + Allow, + Statement, + Principal, + Policy +) +from awacs.sts import ( + AssumeRole +) + +from troposphere import ( + Base64, + FindInMap, + GetAtt, + Join, + Ref, + Output +) + +from troposphere.autoscaling import ( + AutoScalingGroup, + LaunchConfiguration, + Tag +) + +from troposphere.ec2 import ( + BlockDeviceMapping, + EBSBlockDevice +) + +from troposphere.iam import ( + InstanceProfile, + Role +) + +from troposphere.policies import ( + AutoScalingReplacingUpdate, + AutoScalingRollingUpdate, + UpdatePolicy +) + +from troposphere import eks +from stacker.blueprints.base import Blueprint + + +class Cluster(Blueprint): + VARIABLES = { + "Name": { + "type": str, + "description": "The name of the cluster to create.", + }, + "ExistingRoleArn": { + "type": str, + "description": "IAM Role ARN with EKS assume role policies. One " + "will be created if it's not provided.", + "default": "" + }, + "Version": { + "type": str, + "description": "Kubernetes version", + "default": "", + }, + "SecurityGroupIds": { + "type": str, + "description": "A comma separated list of security group ids for " + "controlling ENI access from EKS to workers", + }, + "SubnetIds": { + "type": str, + "description": "A comma separated list of subnet ids where you " + "will launch your worker nodes", + } + } + + # This creates an IAM role which EKS requires, as described here: + # https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#eks-create-cluster + def create_iam_role(self): + eks_service_role_id = "EksServiceRole" + t = self.template + t.add_resource( + Role( + eks_service_role_id, + AssumeRolePolicyDocument=Policy( + Statement=[ + Statement( + Effect=Allow, + Action=[AssumeRole], + Principal=Principal("Service", + ["eks.amazonaws.com"]) + ) + ] + ), + Path="/", + ManagedPolicyArns=[ + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", + ] + ) + ) + return GetAtt(eks_service_role_id, "Arn") + + def get_iam_role(self): + role_arn = self.get_variables()["ExistingRoleArn"] + if role_arn: + return role_arn + return self.create_iam_role() + + def create_template(self): + t = self.template + role_arn = self.get_iam_role() + variables = self.get_variables() + + args = {} + version = variables["Version"] + if version: + args["Version"] = version + + # This is a fully qualified stacker name, prefixed with the namespace. + eks_name_tag = self.context.get_fqn(variables["Name"]) + + t.add_resource( + eks.Cluster( + "EksCluster", + Name=eks_name_tag, + RoleArn=role_arn, + ResourcesVpcConfig=eks.ResourcesVpcConfig( + SecurityGroupIds=variables["SecurityGroupIds"].split(","), + SubnetIds=variables["SubnetIds"].split(","), + ), + **args + ) + ) + + # Output the ClusterName and RoleArn, which are useful as inputs for + # EKS worker nodes. + t.add_output(Output("ClusterName", Value=eks_name_tag)) + t.add_output(Output("RoleArn", Value=role_arn)) + + +# This is a stacker re-implementation of Amazon's template defined here: +# https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml +# More docs can be found here: +# https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html + +# This comes straight from that template above, just formatted for Python +max_pods_per_instance = { + "c4.large": 29, + "c4.xlarge": 58, + "c4.2xlarge": 58, + "c4.4xlarge": 234, + "c4.8xlarge": 234, + "c5.large": 29, + "c5.xlarge": 58, + "c5.2xlarge": 58, + "c5.4xlarge": 234, + "c5.9xlarge": 234, + "c5.18xlarge": 737, + "i3.large": 29, + "i3.xlarge": 58, + "i3.2xlarge": 58, + "i3.4xlarge": 234, + "i3.8xlarge": 234, + "i3.16xlarge": 737, + "m3.medium": 12, + "m3.large": 29, + "m3.xlarge": 58, + "m3.2xlarge": 118, + "m4.large": 20, + "m4.xlarge": 58, + "m4.2xlarge": 58, + "m4.4xlarge": 234, + "m4.10xlarge": 234, + "m5.large": 29, + "m5.xlarge": 58, + "m5.2xlarge": 58, + "m5.4xlarge": 234, + "m5.12xlarge": 234, + "m5.24xlarge": 737, + "p2.xlarge": 58, + "p2.8xlarge": 234, + "p2.16xlarge": 234, + "p3.2xlarge": 58, + "p3.8xlarge": 234, + "p3.16xlarge": 234, + "r3.xlarge": 58, + "r3.2xlarge": 58, + "r3.4xlarge": 234, + "r3.8xlarge": 234, + "r4.large": 29, + "r4.xlarge": 58, + "r4.2xlarge": 58, + "r4.4xlarge": 234, + "r4.8xlarge": 234, + "r4.16xlarge": 737, + "t2.small": 8, + "t2.medium": 17, + "t2.large": 35, + "t2.xlarge": 44, + "t2.2xlarge": 44, + "x1.16xlarge": 234, + "x1.32xlarge": 234, +} + + +def create_max_pods_per_node_mapping(t): + mapping = {} + for instance, max_pods in max_pods_per_instance.iteritems(): + mapping[instance] = {"MaxPods": max_pods} + t.add_mapping("MaxPodsPerNode", mapping) + + +# This is copy/pasted from +# https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml +# and updated to call troposphere functions instead of EC2 CFN placeholders +def get_launch_config_userdata(cluster_name, instance_type): + if instance_type not in max_pods_per_instance: + raise ValueError("%s is not supported by EKS" % instance_type) + + launch_config_userdata = [ + "#!/bin/bash -xe\n", + "CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki", "\n", + "CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt", "\n", + "MODEL_DIRECTORY_PATH=~/.aws/eks", "\n", + "MODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json", "\n", + "mkdir -p $CA_CERTIFICATE_DIRECTORY", "\n", + "mkdir -p $MODEL_DIRECTORY_PATH", "\n", + "curl -o $MODEL_FILE_PATH https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json", + "\n", + "aws configure add-model --service-model file://$MODEL_FILE_PATH --service-name eks", "\n", + "aws eks describe-cluster --region=", Ref("AWS::Region"), " --name=", cluster_name, + " --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json", + "\n", + "cat /tmp/describe_cluster_result.json | grep certificateAuthorityData | awk '{print $2}' | sed 's/[,\"]//g' | base64 -d > $CA_CERTIFICATE_FILE_PATH", + "\n", + "MASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | awk '{print $2}' | sed 's/[,\"]//g')", + "\n", + "INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)", "\n", + "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig", "\n", + "sed -i s,CLUSTER_NAME,", cluster_name, ",g /var/lib/kubelet/kubeconfig", "\n", + "sed -i s,REGION,", Ref("AWS::Region"), ",g /etc/systemd/system/kubelet.service", "\n", + "sed -i s,MAX_PODS,", FindInMap("MaxPodsPerNode", instance_type, "MaxPods"), + ",g /etc/systemd/system/kubelet.service", "\n", + "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service", "\n", + "sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service", "\n", + "DNS_CLUSTER_IP=10.100.0.10", "\n", + "if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi", "\n", + "sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service", "\n", + "sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig", "\n", + "sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service", "\n", + "systemctl daemon-reload", "\n", + "systemctl restart kubelet", "\n", + "/opt/aws/bin/cfn-signal -e $? ", + " --stack ", Ref("AWS::StackName"), + " --resource NodeGroup ", + " --region ", Ref("AWS::Region"), "\n" + ] + return Base64(Join("", launch_config_userdata)) + + +class Workers(Blueprint): + VARIABLES = { + "ClusterName": { + "type": str, + "description": "The name of the cluster for workers to join." + }, + "WorkerSecurityGroupId": { + "type": str, + "description": "The security group ID which will contain worker " + "nodes." + }, + "MinInstanceCount": { + "type": int, + "description": "The minimum number of worker nodes for the worker " + "AutoScalingGroup.", + "default": 1, + }, + "MaxInstanceCount": { + "type": int, + "description": "The maximum number of worker nodes for the worker " + "AutoScalingGroup.", + "default": 3, + }, + "DesiredInstanceCount": { + "type": int, + "description": "The desired number of worker nodes for the worker " + "AutoScalingGroup. Defaults to minimum.", + "default": -1, + }, + "WorkerSubnets": { + "type": str, + "description": "A list of subnet ID's where workers will be " + "launched." + }, + "ImageId": { + "type": str, + "description": "Worker node AMI. You need to use one of the AWS " + "provided EKS worker AMI's." + }, + "InstanceType": { + "type": str, + "description": "Instance type for workers.", + "default": "t2.small", + }, + "KeyName": { + "type": str, + "description": "Existing SSH key name for worker access." + }, + "RootVolumeSize": { + "type": int, + "description": "Root volume size in GB.", + "default": 20, + }, + "RootVolumeDevice": { + "type": str, + "description": "The block device name for the root volume. This " + "will depend on instance type and AMI", + "default": "/dev/sda1" + } + } + + def create_node_instance_role(self): + t = self.template + + # The user data below relies on this map being present. + create_max_pods_per_node_mapping(t) + + # This re-creates NodeInstanceRole from Amazon's CFN template + role = t.add_resource( + Role( + "NodeInstanceRole", + AssumeRolePolicyDocument=Policy( + Statement=[ + Statement( + Effect=Allow, + Action=[AssumeRole], + Principal=Principal("Service", ["ec2.amazonaws.com"]) + ) + ] + ), + Path="/", + ManagedPolicyArns=[ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] + ) + ) + t.add_output(Output("NodeInstanceRole", Value=role.ref())) + return role + + def create_template(self): + t = self.template + variables = self.get_variables() + + # Create the node instance profile which allows nodes to join the + # EKS Cluster + role = self.create_node_instance_role() + profile = t.add_resource( + InstanceProfile( + "NodeInstanceProfile", + Roles=[role.ref()] + ) + ) + + cluster_name = variables["ClusterName"] + instance_type = variables["InstanceType"] + user_data = get_launch_config_userdata(cluster_name, instance_type) + + # Create the launch configuration with a userdata payload that + # configures each node to connect to + launch_config = t.add_resource( + LaunchConfiguration( + "NodeLaunchConfig", + AssociatePublicIpAddress=False, + IamInstanceProfile=profile.ref(), + ImageId=variables["ImageId"], + InstanceType=variables["InstanceType"], + KeyName=variables["KeyName"], + SecurityGroups=[variables["WorkerSecurityGroupId"]], + UserData=Base64(Join("", user_data)), + BlockDeviceMappings=[ + BlockDeviceMapping( + DeviceName=variables["RootVolumeDevice"], + Ebs=EBSBlockDevice( + VolumeSize=variables["RootVolumeSize"], + DeleteOnTermination=True + ) + ), + ], + ) + ) + + min_instances = variables["MinInstanceCount"] + max_instances = variables["MaxInstanceCount"] + desired_instances = variables["DesiredInstanceCount"] + if desired_instances < 0: + desired_instances = min_instances + + # Create the AutoScalingGroup which will manage our instances. It's + # easy to change the worker count be tweaking the limits in here once + # everything is up and running. + t.add_resource( + AutoScalingGroup( + "NodeGroup", + MinSize=min_instances, + MaxSize=max_instances, + DesiredCapacity=desired_instances, + LaunchConfigurationName=launch_config.ref(), + VPCZoneIdentifier=variables["WorkerSubnets"].split(","), + Tags=[ + Tag("Name", "%s-eks-worker" % cluster_name, True), + Tag("kubernetes.io/cluster/%s" % cluster_name, + "owned", True) + ], + UpdatePolicy=UpdatePolicy( + AutoScalingReplacingUpdate=AutoScalingReplacingUpdate( + WillReplace=True, + ), + AutoScalingRollingUpdate=AutoScalingRollingUpdate( + PauseTime='PT5M', + MinInstancesInService="1", + MaxBatchSize='1', + WaitOnResourceSignals=True + ) + ) + ) + ) diff --git a/tests/fixtures/blueprints/eks_cluster.json b/tests/fixtures/blueprints/eks_cluster.json new file mode 100644 index 00000000..bbc8e181 --- /dev/null +++ b/tests/fixtures/blueprints/eks_cluster.json @@ -0,0 +1,63 @@ +{ + "Outputs": { + "ClusterName": { + "Value": "test-k8s" + }, + "RoleArn": { + "Value": { + "Fn::GetAtt": [ + "EksServiceRole", + "Arn" + ] + } + } + }, + "Resources": { + "EksCluster": { + "Properties": { + "Name": "test-k8s", + "ResourcesVpcConfig": { + "SecurityGroupIds": [ + "sg-abc1234" + ], + "SubnetIds": [ + "net-123456", + "net-123457" + ] + }, + "RoleArn": { + "Fn::GetAtt": [ + "EksServiceRole", + "Arn" + ] + } + }, + "Type": "AWS::EKS::Cluster" + }, + "EksServiceRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "eks.amazonaws.com" + ] + } + } + ] + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" + ], + "Path": "/" + }, + "Type": "AWS::IAM::Role" + } + } +} diff --git a/tests/fixtures/blueprints/eks_workers.json b/tests/fixtures/blueprints/eks_workers.json new file mode 100644 index 00000000..da350870 --- /dev/null +++ b/tests/fixtures/blueprints/eks_workers.json @@ -0,0 +1,376 @@ +{ + "Mappings": { + "MaxPodsPerNode": { + "c4.2xlarge": { + "MaxPods": 58 + }, + "c4.4xlarge": { + "MaxPods": 234 + }, + "c4.8xlarge": { + "MaxPods": 234 + }, + "c4.large": { + "MaxPods": 29 + }, + "c4.xlarge": { + "MaxPods": 58 + }, + "c5.18xlarge": { + "MaxPods": 737 + }, + "c5.2xlarge": { + "MaxPods": 58 + }, + "c5.4xlarge": { + "MaxPods": 234 + }, + "c5.9xlarge": { + "MaxPods": 234 + }, + "c5.large": { + "MaxPods": 29 + }, + "c5.xlarge": { + "MaxPods": 58 + }, + "i3.16xlarge": { + "MaxPods": 737 + }, + "i3.2xlarge": { + "MaxPods": 58 + }, + "i3.4xlarge": { + "MaxPods": 234 + }, + "i3.8xlarge": { + "MaxPods": 234 + }, + "i3.large": { + "MaxPods": 29 + }, + "i3.xlarge": { + "MaxPods": 58 + }, + "m3.2xlarge": { + "MaxPods": 118 + }, + "m3.large": { + "MaxPods": 29 + }, + "m3.medium": { + "MaxPods": 12 + }, + "m3.xlarge": { + "MaxPods": 58 + }, + "m4.10xlarge": { + "MaxPods": 234 + }, + "m4.2xlarge": { + "MaxPods": 58 + }, + "m4.4xlarge": { + "MaxPods": 234 + }, + "m4.large": { + "MaxPods": 20 + }, + "m4.xlarge": { + "MaxPods": 58 + }, + "m5.12xlarge": { + "MaxPods": 234 + }, + "m5.24xlarge": { + "MaxPods": 737 + }, + "m5.2xlarge": { + "MaxPods": 58 + }, + "m5.4xlarge": { + "MaxPods": 234 + }, + "m5.large": { + "MaxPods": 29 + }, + "m5.xlarge": { + "MaxPods": 58 + }, + "p2.16xlarge": { + "MaxPods": 234 + }, + "p2.8xlarge": { + "MaxPods": 234 + }, + "p2.xlarge": { + "MaxPods": 58 + }, + "p3.16xlarge": { + "MaxPods": 234 + }, + "p3.2xlarge": { + "MaxPods": 58 + }, + "p3.8xlarge": { + "MaxPods": 234 + }, + "r3.2xlarge": { + "MaxPods": 58 + }, + "r3.4xlarge": { + "MaxPods": 234 + }, + "r3.8xlarge": { + "MaxPods": 234 + }, + "r3.xlarge": { + "MaxPods": 58 + }, + "r4.16xlarge": { + "MaxPods": 737 + }, + "r4.2xlarge": { + "MaxPods": 58 + }, + "r4.4xlarge": { + "MaxPods": 234 + }, + "r4.8xlarge": { + "MaxPods": 234 + }, + "r4.large": { + "MaxPods": 29 + }, + "r4.xlarge": { + "MaxPods": 58 + }, + "t2.2xlarge": { + "MaxPods": 44 + }, + "t2.large": { + "MaxPods": 35 + }, + "t2.medium": { + "MaxPods": 17 + }, + "t2.small": { + "MaxPods": 8 + }, + "t2.xlarge": { + "MaxPods": 44 + }, + "x1.16xlarge": { + "MaxPods": 234 + }, + "x1.32xlarge": { + "MaxPods": 234 + } + } + }, + "Outputs": { + "NodeInstanceRole": { + "Value": { + "Ref": "NodeInstanceRole" + } + } + }, + "Resources": { + "NodeGroup": { + "Properties": { + "DesiredCapacity": 1, + "LaunchConfigurationName": { + "Ref": "NodeLaunchConfig" + }, + "MaxSize": 3, + "MinSize": 1, + "Tags": [ + { + "Key": "Name", + "PropagateAtLaunch": true, + "Value": "test-k8s-eks-worker" + }, + { + "Key": "kubernetes.io/cluster/test-k8s", + "PropagateAtLaunch": true, + "Value": "owned" + } + ], + "VPCZoneIdentifier": [ + "net-123456", + "net-123457" + ] + }, + "Type": "AWS::AutoScaling::AutoScalingGroup", + "UpdatePolicy": { + "AutoScalingReplacingUpdate": { + "WillReplace": "true" + }, + "AutoScalingRollingUpdate": { + "MaxBatchSize": "1", + "MinInstancesInService": "1", + "PauseTime": "PT5M", + "WaitOnResourceSignals": "true" + } + } + }, + "NodeInstanceProfile": { + "Properties": { + "Roles": [ + { + "Ref": "NodeInstanceRole" + } + ] + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "NodeInstanceRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ], + "Path": "/" + }, + "Type": "AWS::IAM::Role" + }, + "NodeLaunchConfig": { + "Properties": { + "AssociatePublicIpAddress": "false", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "DeleteOnTermination": "true", + "VolumeSize": 20 + } + } + ], + "IamInstanceProfile": { + "Ref": "NodeInstanceProfile" + }, + "ImageId": "ami-73a6e20b", + "InstanceType": "t2.small", + "KeyName": "id_rsa_aws", + "SecurityGroups": [ + "sg-def5678" + ], + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash -xe\n", + "CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki", + "\n", + "CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt", + "\n", + "MODEL_DIRECTORY_PATH=~/.aws/eks", + "\n", + "MODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json", + "\n", + "mkdir -p $CA_CERTIFICATE_DIRECTORY", + "\n", + "mkdir -p $MODEL_DIRECTORY_PATH", + "\n", + "curl -o $MODEL_FILE_PATH https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json", + "\n", + "aws configure add-model --service-model file://$MODEL_FILE_PATH --service-name eks", + "\n", + "aws eks describe-cluster --region=", + { + "Ref": "AWS::Region" + }, + " --name=", + "test-k8s", + " --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json", + "\n", + "cat /tmp/describe_cluster_result.json | grep certificateAuthorityData | awk '{print $2}' | sed 's/[,\"]//g' | base64 -d > $CA_CERTIFICATE_FILE_PATH", + "\n", + "MASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | awk '{print $2}' | sed 's/[,\"]//g')", + "\n", + "INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)", + "\n", + "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig", + "\n", + "sed -i s,CLUSTER_NAME,", + "test-k8s", + ",g /var/lib/kubelet/kubeconfig", + "\n", + "sed -i s,REGION,", + { + "Ref": "AWS::Region" + }, + ",g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,MAX_PODS,", + { + "Fn::FindInMap": [ + "MaxPodsPerNode", + "t2.small", + "MaxPods" + ] + }, + ",g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service", + "\n", + "DNS_CLUSTER_IP=10.100.0.10", + "\n", + "if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi", + "\n", + "sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig", + "\n", + "sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service", + "\n", + "systemctl daemon-reload", + "\n", + "systemctl restart kubelet", + "\n", + "/opt/aws/bin/cfn-signal -e $? ", + " --stack ", + { + "Ref": "AWS::StackName" + }, + " --resource NodeGroup ", + " --region ", + { + "Ref": "AWS::Region" + }, + "\n" + ] + ] + } + } + ] + } + } + }, + "Type": "AWS::AutoScaling::LaunchConfiguration" + } + } +} diff --git a/tests/test_eks.py b/tests/test_eks.py new file mode 100644 index 00000000..dd450a2f --- /dev/null +++ b/tests/test_eks.py @@ -0,0 +1,54 @@ +from stacker.context import Context, Config +from stacker.variables import Variable +from stacker_blueprints.eks import ( + Cluster, + Workers +) +from stacker.blueprints.testutil import BlueprintTestCase + + +class TestCluster(BlueprintTestCase): + def setUp(self): + self.common_variables = { + "Name": "k8s", + "SecurityGroupIds": "sg-abc1234", + "SubnetIds": "net-123456,net-123457", + } + self.ctx = Context(config=Config({'namespace': 'test'})) + + def generate_variables(self, variable_dict=None): + variable_dict = variable_dict or {} + self.common_variables.update(variable_dict) + return [Variable(k, v) for k, v in self.common_variables.items()] + + def test_eks_cluster(self): + bp = Cluster("eks_cluster", self.ctx) + bp.resolve_variables(self.generate_variables()) + bp.create_template() + self.assertRenderedBlueprint(bp) + + +class TestWorkers(BlueprintTestCase): + def setUp(self): + self.common_variables = { + "ClusterName": "test-k8s", + "WorkerSecurityGroupId": "sg-def5678", + "MinInstanceCount": 1, + "MaxInstanceCount": 3, + "WorkerSubnets": "net-123456,net-123457", + "ImageId": "ami-73a6e20b", + "InstanceType": "t2.small", + "KeyName": "id_rsa_aws", + } + self.ctx = Context(config=Config({'namespace': 'test'})) + + def generate_variables(self, variable_dict=None): + variable_dict = variable_dict or {} + self.common_variables.update(variable_dict) + return [Variable(k, v) for k, v in self.common_variables.items()] + + def test_eks_workers(self): + bp = Workers("eks_workers", self.ctx) + bp.resolve_variables(self.generate_variables()) + bp.create_template() + self.assertRenderedBlueprint(bp) From 53a9801e65376cdea96292ca48781a1dbfab31ff Mon Sep 17 00:00:00 2001 From: marcinromaszewicz Date: Mon, 16 Jul 2018 22:15:33 -0700 Subject: [PATCH 2/4] Code review changes Made changes per PR requests. --- stacker_blueprints/eks.py | 18 +- tests/fixtures/blueprints/eks_workers.json | 365 ++++++++++----------- tests/test_eks.py | 4 +- 3 files changed, 190 insertions(+), 197 deletions(-) diff --git a/stacker_blueprints/eks.py b/stacker_blueprints/eks.py index aa250fc0..ae7123f7 100644 --- a/stacker_blueprints/eks.py +++ b/stacker_blueprints/eks.py @@ -77,7 +77,7 @@ class Cluster(Blueprint): def create_iam_role(self): eks_service_role_id = "EksServiceRole" t = self.template - t.add_resource( + role = t.add_resource( Role( eks_service_role_id, AssumeRolePolicyDocument=Policy( @@ -97,7 +97,7 @@ def create_iam_role(self): ] ) ) - return GetAtt(eks_service_role_id, "Arn") + return role.GetAtt("Arn") def get_iam_role(self): role_arn = self.get_variables()["ExistingRoleArn"] @@ -204,7 +204,7 @@ def create_template(self): def create_max_pods_per_node_mapping(t): mapping = {} - for instance, max_pods in max_pods_per_instance.iteritems(): + for instance, max_pods in max_pods_per_instance.items(): mapping[instance] = {"MaxPods": max_pods} t.add_mapping("MaxPodsPerNode", mapping) @@ -263,7 +263,7 @@ class Workers(Blueprint): "type": str, "description": "The name of the cluster for workers to join." }, - "WorkerSecurityGroupId": { + "SecurityGroupId": { "type": str, "description": "The security group ID which will contain worker " "nodes." @@ -286,7 +286,7 @@ class Workers(Blueprint): "AutoScalingGroup. Defaults to minimum.", "default": -1, }, - "WorkerSubnets": { + "Subnets": { "type": str, "description": "A list of subnet ID's where workers will be " "launched." @@ -376,8 +376,8 @@ def create_template(self): ImageId=variables["ImageId"], InstanceType=variables["InstanceType"], KeyName=variables["KeyName"], - SecurityGroups=[variables["WorkerSecurityGroupId"]], - UserData=Base64(Join("", user_data)), + SecurityGroups=[variables["SecurityGroupId"]], + UserData=user_data, BlockDeviceMappings=[ BlockDeviceMapping( DeviceName=variables["RootVolumeDevice"], @@ -397,7 +397,7 @@ def create_template(self): desired_instances = min_instances # Create the AutoScalingGroup which will manage our instances. It's - # easy to change the worker count be tweaking the limits in here once + # easy to change the worker count by tweaking the limits in here once # everything is up and running. t.add_resource( AutoScalingGroup( @@ -406,7 +406,7 @@ def create_template(self): MaxSize=max_instances, DesiredCapacity=desired_instances, LaunchConfigurationName=launch_config.ref(), - VPCZoneIdentifier=variables["WorkerSubnets"].split(","), + VPCZoneIdentifier=variables["Subnets"].split(","), Tags=[ Tag("Name", "%s-eks-worker" % cluster_name, True), Tag("kubernetes.io/cluster/%s" % cluster_name, diff --git a/tests/fixtures/blueprints/eks_workers.json b/tests/fixtures/blueprints/eks_workers.json index da350870..49cbb9a1 100644 --- a/tests/fixtures/blueprints/eks_workers.json +++ b/tests/fixtures/blueprints/eks_workers.json @@ -3,217 +3,217 @@ "MaxPodsPerNode": { "c4.2xlarge": { "MaxPods": 58 - }, + }, "c4.4xlarge": { "MaxPods": 234 - }, + }, "c4.8xlarge": { "MaxPods": 234 - }, + }, "c4.large": { "MaxPods": 29 - }, + }, "c4.xlarge": { "MaxPods": 58 - }, + }, "c5.18xlarge": { "MaxPods": 737 - }, + }, "c5.2xlarge": { "MaxPods": 58 - }, + }, "c5.4xlarge": { "MaxPods": 234 - }, + }, "c5.9xlarge": { "MaxPods": 234 - }, + }, "c5.large": { "MaxPods": 29 - }, + }, "c5.xlarge": { "MaxPods": 58 - }, + }, "i3.16xlarge": { "MaxPods": 737 - }, + }, "i3.2xlarge": { "MaxPods": 58 - }, + }, "i3.4xlarge": { "MaxPods": 234 - }, + }, "i3.8xlarge": { "MaxPods": 234 - }, + }, "i3.large": { "MaxPods": 29 - }, + }, "i3.xlarge": { "MaxPods": 58 - }, + }, "m3.2xlarge": { "MaxPods": 118 - }, + }, "m3.large": { "MaxPods": 29 - }, + }, "m3.medium": { "MaxPods": 12 - }, + }, "m3.xlarge": { "MaxPods": 58 - }, + }, "m4.10xlarge": { "MaxPods": 234 - }, + }, "m4.2xlarge": { "MaxPods": 58 - }, + }, "m4.4xlarge": { "MaxPods": 234 - }, + }, "m4.large": { "MaxPods": 20 - }, + }, "m4.xlarge": { "MaxPods": 58 - }, + }, "m5.12xlarge": { "MaxPods": 234 - }, + }, "m5.24xlarge": { "MaxPods": 737 - }, + }, "m5.2xlarge": { "MaxPods": 58 - }, + }, "m5.4xlarge": { "MaxPods": 234 - }, + }, "m5.large": { "MaxPods": 29 - }, + }, "m5.xlarge": { "MaxPods": 58 - }, + }, "p2.16xlarge": { "MaxPods": 234 - }, + }, "p2.8xlarge": { "MaxPods": 234 - }, + }, "p2.xlarge": { "MaxPods": 58 - }, + }, "p3.16xlarge": { "MaxPods": 234 - }, + }, "p3.2xlarge": { "MaxPods": 58 - }, + }, "p3.8xlarge": { "MaxPods": 234 - }, + }, "r3.2xlarge": { "MaxPods": 58 - }, + }, "r3.4xlarge": { "MaxPods": 234 - }, + }, "r3.8xlarge": { "MaxPods": 234 - }, + }, "r3.xlarge": { "MaxPods": 58 - }, + }, "r4.16xlarge": { "MaxPods": 737 - }, + }, "r4.2xlarge": { "MaxPods": 58 - }, + }, "r4.4xlarge": { "MaxPods": 234 - }, + }, "r4.8xlarge": { "MaxPods": 234 - }, + }, "r4.large": { "MaxPods": 29 - }, + }, "r4.xlarge": { "MaxPods": 58 - }, + }, "t2.2xlarge": { "MaxPods": 44 - }, + }, "t2.large": { "MaxPods": 35 - }, + }, "t2.medium": { "MaxPods": 17 - }, + }, "t2.small": { "MaxPods": 8 - }, + }, "t2.xlarge": { "MaxPods": 44 - }, + }, "x1.16xlarge": { "MaxPods": 234 - }, + }, "x1.32xlarge": { "MaxPods": 234 } } - }, + }, "Outputs": { "NodeInstanceRole": { "Value": { "Ref": "NodeInstanceRole" } } - }, + }, "Resources": { "NodeGroup": { "Properties": { - "DesiredCapacity": 1, + "DesiredCapacity": 1, "LaunchConfigurationName": { "Ref": "NodeLaunchConfig" - }, - "MaxSize": 3, - "MinSize": 1, + }, + "MaxSize": 3, + "MinSize": 1, "Tags": [ { - "Key": "Name", - "PropagateAtLaunch": true, + "Key": "Name", + "PropagateAtLaunch": true, "Value": "test-k8s-eks-worker" - }, + }, { - "Key": "kubernetes.io/cluster/test-k8s", - "PropagateAtLaunch": true, + "Key": "kubernetes.io/cluster/test-k8s", + "PropagateAtLaunch": true, "Value": "owned" } - ], + ], "VPCZoneIdentifier": [ - "net-123456", + "net-123456", "net-123457" ] - }, - "Type": "AWS::AutoScaling::AutoScalingGroup", + }, + "Type": "AWS::AutoScaling::AutoScalingGroup", "UpdatePolicy": { "AutoScalingReplacingUpdate": { "WillReplace": "true" - }, + }, "AutoScalingRollingUpdate": { - "MaxBatchSize": "1", - "MinInstancesInService": "1", - "PauseTime": "PT5M", + "MaxBatchSize": "1", + "MinInstancesInService": "1", + "PauseTime": "PT5M", "WaitOnResourceSignals": "true" } } - }, + }, "NodeInstanceProfile": { "Properties": { "Roles": [ @@ -221,9 +221,9 @@ "Ref": "NodeInstanceRole" } ] - }, + }, "Type": "AWS::IAM::InstanceProfile" - }, + }, "NodeInstanceRole": { "Properties": { "AssumeRolePolicyDocument": { @@ -231,8 +231,8 @@ { "Action": [ "sts:AssumeRole" - ], - "Effect": "Allow", + ], + "Effect": "Allow", "Principal": { "Service": [ "ec2.amazonaws.com" @@ -240,137 +240,130 @@ } } ] - }, + }, "ManagedPolicyArns": [ - "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - ], + ], "Path": "/" - }, + }, "Type": "AWS::IAM::Role" - }, + }, "NodeLaunchConfig": { "Properties": { - "AssociatePublicIpAddress": "false", + "AssociatePublicIpAddress": "false", "BlockDeviceMappings": [ { - "DeviceName": "/dev/sda1", + "DeviceName": "/dev/sda1", "Ebs": { - "DeleteOnTermination": "true", + "DeleteOnTermination": "true", "VolumeSize": 20 } } - ], + ], "IamInstanceProfile": { "Ref": "NodeInstanceProfile" - }, - "ImageId": "ami-73a6e20b", - "InstanceType": "t2.small", - "KeyName": "id_rsa_aws", + }, + "ImageId": "ami-73a6e20b", + "InstanceType": "t2.small", + "KeyName": "id_rsa_aws", "SecurityGroups": [ "sg-def5678" - ], + ], "UserData": { "Fn::Base64": { "Fn::Join": [ - "", - { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash -xe\n", - "CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki", - "\n", - "CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt", - "\n", - "MODEL_DIRECTORY_PATH=~/.aws/eks", - "\n", - "MODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json", - "\n", - "mkdir -p $CA_CERTIFICATE_DIRECTORY", - "\n", - "mkdir -p $MODEL_DIRECTORY_PATH", - "\n", - "curl -o $MODEL_FILE_PATH https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json", - "\n", - "aws configure add-model --service-model file://$MODEL_FILE_PATH --service-name eks", - "\n", - "aws eks describe-cluster --region=", - { - "Ref": "AWS::Region" - }, - " --name=", - "test-k8s", - " --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json", - "\n", - "cat /tmp/describe_cluster_result.json | grep certificateAuthorityData | awk '{print $2}' | sed 's/[,\"]//g' | base64 -d > $CA_CERTIFICATE_FILE_PATH", - "\n", - "MASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | awk '{print $2}' | sed 's/[,\"]//g')", - "\n", - "INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)", - "\n", - "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig", - "\n", - "sed -i s,CLUSTER_NAME,", - "test-k8s", - ",g /var/lib/kubelet/kubeconfig", - "\n", - "sed -i s,REGION,", - { - "Ref": "AWS::Region" - }, - ",g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,MAX_PODS,", - { - "Fn::FindInMap": [ - "MaxPodsPerNode", - "t2.small", - "MaxPods" - ] - }, - ",g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service", - "\n", - "DNS_CLUSTER_IP=10.100.0.10", - "\n", - "if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi", - "\n", - "sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig", - "\n", - "sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service", - "\n", - "systemctl daemon-reload", - "\n", - "systemctl restart kubelet", - "\n", - "/opt/aws/bin/cfn-signal -e $? ", - " --stack ", - { - "Ref": "AWS::StackName" - }, - " --resource NodeGroup ", - " --region ", - { - "Ref": "AWS::Region" - }, - "\n" - ] + "", + [ + "#!/bin/bash -xe\n", + "CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki", + "\n", + "CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt", + "\n", + "MODEL_DIRECTORY_PATH=~/.aws/eks", + "\n", + "MODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json", + "\n", + "mkdir -p $CA_CERTIFICATE_DIRECTORY", + "\n", + "mkdir -p $MODEL_DIRECTORY_PATH", + "\n", + "curl -o $MODEL_FILE_PATH https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json", + "\n", + "aws configure add-model --service-model file://$MODEL_FILE_PATH --service-name eks", + "\n", + "aws eks describe-cluster --region=", + { + "Ref": "AWS::Region" + }, + " --name=", + "test-k8s", + " --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json", + "\n", + "cat /tmp/describe_cluster_result.json | grep certificateAuthorityData | awk '{print $2}' | sed 's/[,\"]//g' | base64 -d > $CA_CERTIFICATE_FILE_PATH", + "\n", + "MASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | awk '{print $2}' | sed 's/[,\"]//g')", + "\n", + "INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)", + "\n", + "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig", + "\n", + "sed -i s,CLUSTER_NAME,", + "test-k8s", + ",g /var/lib/kubelet/kubeconfig", + "\n", + "sed -i s,REGION,", + { + "Ref": "AWS::Region" + }, + ",g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,MAX_PODS,", + { + "Fn::FindInMap": [ + "MaxPodsPerNode", + "t2.small", + "MaxPods" ] - } - } + }, + ",g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service", + "\n", + "DNS_CLUSTER_IP=10.100.0.10", + "\n", + "if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi", + "\n", + "sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service", + "\n", + "sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig", + "\n", + "sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service", + "\n", + "systemctl daemon-reload", + "\n", + "systemctl restart kubelet", + "\n", + "/opt/aws/bin/cfn-signal -e $? ", + " --stack ", + { + "Ref": "AWS::StackName" + }, + " --resource NodeGroup ", + " --region ", + { + "Ref": "AWS::Region" + }, + "\n" + ] ] } } - }, + }, "Type": "AWS::AutoScaling::LaunchConfiguration" } } -} +} \ No newline at end of file diff --git a/tests/test_eks.py b/tests/test_eks.py index dd450a2f..c805fc3c 100644 --- a/tests/test_eks.py +++ b/tests/test_eks.py @@ -32,10 +32,10 @@ class TestWorkers(BlueprintTestCase): def setUp(self): self.common_variables = { "ClusterName": "test-k8s", - "WorkerSecurityGroupId": "sg-def5678", + "SecurityGroupId": "sg-def5678", "MinInstanceCount": 1, "MaxInstanceCount": 3, - "WorkerSubnets": "net-123456,net-123457", + "Subnets": "net-123456,net-123457", "ImageId": "ami-73a6e20b", "InstanceType": "t2.small", "KeyName": "id_rsa_aws", From 692907bea9c8c4a69db59bff4dfa305b62be5eca Mon Sep 17 00:00:00 2001 From: Michael Barrett Date: Sat, 21 Jul 2018 13:07:59 -0700 Subject: [PATCH 3/4] Some updates based on other comments --- stacker_blueprints/eks.py | 362 ++++++++++++--------- tests/fixtures/blueprints/eks_cluster.json | 60 ++-- tests/fixtures/blueprints/eks_workers.json | 352 ++++---------------- 3 files changed, 311 insertions(+), 463 deletions(-) diff --git a/stacker_blueprints/eks.py b/stacker_blueprints/eks.py index ae7123f7..bab3dc49 100644 --- a/stacker_blueprints/eks.py +++ b/stacker_blueprints/eks.py @@ -1,54 +1,44 @@ -from awacs.aws import ( - Allow, - Statement, - Principal, - Policy -) -from awacs.sts import ( - AssumeRole +from awacs.helpers.trust import ( + get_default_assumerole_policy, + make_simple_assume_policy, ) from troposphere import ( Base64, - FindInMap, - GetAtt, - Join, - Ref, - Output + NoValue, + Output, + Sub, ) from troposphere.autoscaling import ( AutoScalingGroup, LaunchConfiguration, - Tag + Tag, ) from troposphere.ec2 import ( BlockDeviceMapping, - EBSBlockDevice + EBSBlockDevice, ) from troposphere.iam import ( InstanceProfile, - Role + Role, ) from troposphere.policies import ( AutoScalingReplacingUpdate, AutoScalingRollingUpdate, - UpdatePolicy + UpdatePolicy, ) from troposphere import eks + from stacker.blueprints.base import Blueprint class Cluster(Blueprint): VARIABLES = { - "Name": { - "type": str, - "description": "The name of the cluster to create.", - }, "ExistingRoleArn": { "type": str, "description": "IAM Role ARN with EKS assume role policies. One " @@ -72,24 +62,32 @@ class Cluster(Blueprint): } } + @property + def existing_role_arn(self): + return self.get_variables()["ExistingRoleArn"] + + @property + def version(self): + return self.get_variables()["Version"] + + @property + def security_group_ids(self): + return self.get_variables()["SecurityGroupIds"] + + @property + def subnet_ids(self): + return self.get_variables()["SubnetIds"] + # This creates an IAM role which EKS requires, as described here: # https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html#eks-create-cluster def create_iam_role(self): - eks_service_role_id = "EksServiceRole" t = self.template - role = t.add_resource( + + policy = make_simple_assume_policy("eks.amazonaws.com") + self.role = t.add_resource( Role( - eks_service_role_id, - AssumeRolePolicyDocument=Policy( - Statement=[ - Statement( - Effect=Allow, - Action=[AssumeRole], - Principal=Principal("Service", - ["eks.amazonaws.com"]) - ) - ] - ), + "Role", + AssumeRolePolicyDocument=policy, Path="/", ManagedPolicyArns=[ "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", @@ -97,43 +95,34 @@ def create_iam_role(self): ] ) ) - return role.GetAtt("Arn") + return self.role.GetAtt("Arn") def get_iam_role(self): - role_arn = self.get_variables()["ExistingRoleArn"] - if role_arn: - return role_arn - return self.create_iam_role() + return self.existing_role_arn or self.create_iam_role() def create_template(self): t = self.template role_arn = self.get_iam_role() - variables = self.get_variables() - - args = {} - version = variables["Version"] - if version: - args["Version"] = version - # This is a fully qualified stacker name, prefixed with the namespace. - eks_name_tag = self.context.get_fqn(variables["Name"]) + version = self.version or NoValue - t.add_resource( + self.cluster = t.add_resource( eks.Cluster( - "EksCluster", - Name=eks_name_tag, + "Cluster", RoleArn=role_arn, ResourcesVpcConfig=eks.ResourcesVpcConfig( - SecurityGroupIds=variables["SecurityGroupIds"].split(","), - SubnetIds=variables["SubnetIds"].split(","), + SecurityGroupIds=self.security_group_ids.split(","), + SubnetIds=self.subnet_ids.split(","), ), - **args + Version=version, ) ) - # Output the ClusterName and RoleArn, which are useful as inputs for - # EKS worker nodes. - t.add_output(Output("ClusterName", Value=eks_name_tag)) + t.add_output(Output("ClusterName", Value=self.cluster.Ref())) + t.add_output(Output("ClusterArn", Value=self.cluster.GetAtt("Arn"))) + t.add_output( + Output("ClusterEndpoint", Value=self.cluster.GetAtt("Endpoint")) + ) t.add_output(Output("RoleArn", Value=role_arn)) @@ -143,7 +132,7 @@ def create_template(self): # https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html # This comes straight from that template above, just formatted for Python -max_pods_per_instance = { +MAX_PODS_PER_INSTANCE = { "c4.large": 29, "c4.xlarge": 58, "c4.2xlarge": 58, @@ -202,59 +191,65 @@ def create_template(self): } -def create_max_pods_per_node_mapping(t): - mapping = {} - for instance, max_pods in max_pods_per_instance.items(): - mapping[instance] = {"MaxPods": max_pods} - t.add_mapping("MaxPodsPerNode", mapping) +LAUNCH_CONFIG_USERDATA = """ +#!/bin/bash -xe +CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki +CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt +MODEL_DIRECTORY_PATH=~/.aws/eks +MODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json +mkdir -p $CA_CERTIFICATE_DIRECTORY +mkdir -p $MODEL_DIRECTORY_PATH +curl -o $MODEL_FILE_PATH \ + https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json +aws configure add-model --service-model file://$MODEL_FILE_PATH \ + --service-name eks +aws eks describe-cluster --region=${"AWS::Region"} --name=${ClusterName} \ + --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json +cat /tmp/describe_cluster_result.json | grep certificateAuthorityData | \ + awk '{print $2}' | sed 's/[,\"]//g' | \ + base64 -d > $CA_CERTIFICATE_FILE_PATH +MASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | \ + awk '{print $2}' | sed 's/[,\"]//g') +INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) +sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig +sed -i s,CLUSTER_NAME,", cluster_name, ",g /var/lib/kubelet/kubeconfig +sed -i s,REGION,${AWS::Region},g /etc/systemd/system/kubelet.service +sed -i s,MAX_PODS,${MaxPods} +,g /etc/systemd/system/kubelet.service +sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service +sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service +DNS_CLUSTER_IP=10.100.0.10 +if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi +sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service +sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g \ + /var/lib/kubelet/kubeconfig +sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g \ + /etc/systemd/system/kubelet.service +systemctl daemon-reload +systemctl restart kubelet +/opt/aws/bin/cfn-signal -e $? \ + --stack ${AWS::StackName} \ + --resource NodeGroup \ + --region ${AWS::Region} +""" # This is copy/pasted from # https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml # and updated to call troposphere functions instead of EC2 CFN placeholders def get_launch_config_userdata(cluster_name, instance_type): - if instance_type not in max_pods_per_instance: + try: + max_pods = MAX_PODS_PER_INSTANCE[instance_type] + except KeyError: raise ValueError("%s is not supported by EKS" % instance_type) - launch_config_userdata = [ - "#!/bin/bash -xe\n", - "CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki", "\n", - "CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt", "\n", - "MODEL_DIRECTORY_PATH=~/.aws/eks", "\n", - "MODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json", "\n", - "mkdir -p $CA_CERTIFICATE_DIRECTORY", "\n", - "mkdir -p $MODEL_DIRECTORY_PATH", "\n", - "curl -o $MODEL_FILE_PATH https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json", - "\n", - "aws configure add-model --service-model file://$MODEL_FILE_PATH --service-name eks", "\n", - "aws eks describe-cluster --region=", Ref("AWS::Region"), " --name=", cluster_name, - " --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json", - "\n", - "cat /tmp/describe_cluster_result.json | grep certificateAuthorityData | awk '{print $2}' | sed 's/[,\"]//g' | base64 -d > $CA_CERTIFICATE_FILE_PATH", - "\n", - "MASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | awk '{print $2}' | sed 's/[,\"]//g')", - "\n", - "INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)", "\n", - "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig", "\n", - "sed -i s,CLUSTER_NAME,", cluster_name, ",g /var/lib/kubelet/kubeconfig", "\n", - "sed -i s,REGION,", Ref("AWS::Region"), ",g /etc/systemd/system/kubelet.service", "\n", - "sed -i s,MAX_PODS,", FindInMap("MaxPodsPerNode", instance_type, "MaxPods"), - ",g /etc/systemd/system/kubelet.service", "\n", - "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service", "\n", - "sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service", "\n", - "DNS_CLUSTER_IP=10.100.0.10", "\n", - "if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi", "\n", - "sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service", "\n", - "sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig", "\n", - "sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service", "\n", - "systemctl daemon-reload", "\n", - "systemctl restart kubelet", "\n", - "/opt/aws/bin/cfn-signal -e $? ", - " --stack ", Ref("AWS::StackName"), - " --resource NodeGroup ", - " --region ", Ref("AWS::Region"), "\n" - ] - return Base64(Join("", launch_config_userdata)) + launch_config_userdata = Sub( + LAUNCH_CONFIG_USERDATA, + ClusterName=cluster_name, + MaxPods=max_pods + ) + + return Base64(launch_config_userdata) class Workers(Blueprint): @@ -318,71 +313,115 @@ class Workers(Blueprint): } } + @property + def cluster_name(self): + return self.get_variables()["ClusterName"] + + @property + def security_group_id(self): + return self.get_variables()["SecurityGroupId"] + + @property + def min_instance_count(self): + return self.get_variables()["MinInstanceCount"] + + @property + def max_instance_count(self): + return self.get_variables()["MaxInstanceCount"] + + @property + def desired_instance_count(self): + return self.get_variables()["DesiredInstanceCount"] + + @property + def subnets(self): + return self.get_variables()["Subnets"] + + @property + def image_id(self): + return self.get_variables()["ImageId"] + + @property + def instance_type(self): + return self.get_variables()["InstanceType"] + + @property + def key_name(self): + return self.get_variables()["KeyName"] + + @property + def root_volume_size(self): + return self.get_variables()["RootVolumeSize"] + + @property + def root_volume_device(self): + return self.get_variables()["RootVolumeDevice"] + def create_node_instance_role(self): t = self.template - # The user data below relies on this map being present. - create_max_pods_per_node_mapping(t) + policy_arns = [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] # This re-creates NodeInstanceRole from Amazon's CFN template - role = t.add_resource( + self.role = t.add_resource( Role( - "NodeInstanceRole", - AssumeRolePolicyDocument=Policy( - Statement=[ - Statement( - Effect=Allow, - Action=[AssumeRole], - Principal=Principal("Service", ["ec2.amazonaws.com"]) - ) - ] - ), + "Role", + AssumeRolePolicyDocument=get_default_assumerole_policy(), Path="/", - ManagedPolicyArns=[ - "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - ] + ManagedPolicyArns=policy_arns, ) ) - t.add_output(Output("NodeInstanceRole", Value=role.ref())) - return role + t.add_output(Output("Role", Value=self.role.ref())) - def create_template(self): + def create_instance_profile(self): t = self.template - variables = self.get_variables() - # Create the node instance profile which allows nodes to join the - # EKS Cluster - role = self.create_node_instance_role() - profile = t.add_resource( + self.instance_profile = t.add_resource( InstanceProfile( - "NodeInstanceProfile", - Roles=[role.ref()] + "InstanceProfile", + Roles=[self.role.Ref()] + ) + ) + + t.add_output( + Output("InstanceProfile", Value=self.instance_profile.Ref()) + ) + t.add_output( + Output( + "InstanceProfileArn", + Value=self.instance_profile.GetAtt("Arn") ) ) - cluster_name = variables["ClusterName"] - instance_type = variables["InstanceType"] - user_data = get_launch_config_userdata(cluster_name, instance_type) + def create_launch_config(self): + t = self.template + + user_data = get_launch_config_userdata( + self.cluster_name, + self.instance_type + ) # Create the launch configuration with a userdata payload that # configures each node to connect to - launch_config = t.add_resource( + self.launch_config = t.add_resource( LaunchConfiguration( - "NodeLaunchConfig", + "LaunchConfiguration", AssociatePublicIpAddress=False, - IamInstanceProfile=profile.ref(), - ImageId=variables["ImageId"], - InstanceType=variables["InstanceType"], - KeyName=variables["KeyName"], - SecurityGroups=[variables["SecurityGroupId"]], + IamInstanceProfile=self.instance_profile.Ref(), + ImageId=self.image_id, + InstanceType=self.instance_type, + KeyName=self.key_name, + SecurityGroups=[self.security_group_id], UserData=user_data, BlockDeviceMappings=[ BlockDeviceMapping( - DeviceName=variables["RootVolumeDevice"], + DeviceName=self.root_volume_device, Ebs=EBSBlockDevice( - VolumeSize=variables["RootVolumeSize"], + VolumeSize=self.root_volume_size, DeleteOnTermination=True ) ), @@ -390,26 +429,31 @@ def create_template(self): ) ) - min_instances = variables["MinInstanceCount"] - max_instances = variables["MaxInstanceCount"] - desired_instances = variables["DesiredInstanceCount"] + t.add_output( + Output("LaunchConfiguration", Value=self.launch_config.Ref()) + ) + + def create_auto_scaling_group(self): + t = self.template + + desired_instances = self.desired_instance_count if desired_instances < 0: - desired_instances = min_instances + desired_instances = self.min_instance_count # Create the AutoScalingGroup which will manage our instances. It's # easy to change the worker count by tweaking the limits in here once # everything is up and running. - t.add_resource( + self.auto_scaling_group = t.add_resource( AutoScalingGroup( - "NodeGroup", - MinSize=min_instances, - MaxSize=max_instances, + "AutoScalingGroup", + MinSize=self.min_instance_count, + MaxSize=self.max_instance_count, DesiredCapacity=desired_instances, - LaunchConfigurationName=launch_config.ref(), - VPCZoneIdentifier=variables["Subnets"].split(","), + LaunchConfigurationName=self.launch_config.Ref(), + VPCZoneIdentifier=self.subnets.split(","), Tags=[ - Tag("Name", "%s-eks-worker" % cluster_name, True), - Tag("kubernetes.io/cluster/%s" % cluster_name, + Tag("Name", "%s-eks-worker" % self.cluster_name, True), + Tag("kubernetes.io/cluster/%s" % self.cluster_name, "owned", True) ], UpdatePolicy=UpdatePolicy( @@ -425,3 +469,13 @@ def create_template(self): ) ) ) + + t.add_output( + Output("AutoScalingGroup", Value=self.auto_scaling_group.Ref()) + ) + + def create_template(self): + self.create_node_instance_role() + self.create_instance_profile() + self.create_launch_config() + self.create_auto_scaling_group() diff --git a/tests/fixtures/blueprints/eks_cluster.json b/tests/fixtures/blueprints/eks_cluster.json index bbc8e181..dcb367da 100644 --- a/tests/fixtures/blueprints/eks_cluster.json +++ b/tests/fixtures/blueprints/eks_cluster.json @@ -1,48 +1,68 @@ { "Outputs": { + "ClusterArn": { + "Value": { + "Fn::GetAtt": [ + "Cluster", + "Arn" + ] + } + }, + "ClusterEndpoint": { + "Value": { + "Fn::GetAtt": [ + "Cluster", + "Endpoint" + ] + } + }, "ClusterName": { - "Value": "test-k8s" - }, + "Value": { + "Ref": "Cluster" + } + }, "RoleArn": { "Value": { "Fn::GetAtt": [ - "EksServiceRole", + "Role", "Arn" ] } } - }, + }, "Resources": { - "EksCluster": { + "Cluster": { "Properties": { - "Name": "test-k8s", "ResourcesVpcConfig": { "SecurityGroupIds": [ "sg-abc1234" - ], + ], "SubnetIds": [ - "net-123456", + "net-123456", "net-123457" ] - }, + }, "RoleArn": { "Fn::GetAtt": [ - "EksServiceRole", + "Role", "Arn" ] + }, + "Version": { + "Ref": "AWS::NoValue" } - }, + }, "Type": "AWS::EKS::Cluster" - }, - "EksServiceRole": { + }, + "Role": { "Properties": { "AssumeRolePolicyDocument": { "Statement": [ { "Action": [ "sts:AssumeRole" - ], - "Effect": "Allow", + ], + "Effect": "Allow", "Principal": { "Service": [ "eks.amazonaws.com" @@ -50,14 +70,14 @@ } } ] - }, + }, "ManagedPolicyArns": [ - "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" - ], + ], "Path": "/" - }, + }, "Type": "AWS::IAM::Role" } } -} +} \ No newline at end of file diff --git a/tests/fixtures/blueprints/eks_workers.json b/tests/fixtures/blueprints/eks_workers.json index 49cbb9a1..290ed50f 100644 --- a/tests/fixtures/blueprints/eks_workers.json +++ b/tests/fixtures/blueprints/eks_workers.json @@ -1,186 +1,40 @@ { - "Mappings": { - "MaxPodsPerNode": { - "c4.2xlarge": { - "MaxPods": 58 - }, - "c4.4xlarge": { - "MaxPods": 234 - }, - "c4.8xlarge": { - "MaxPods": 234 - }, - "c4.large": { - "MaxPods": 29 - }, - "c4.xlarge": { - "MaxPods": 58 - }, - "c5.18xlarge": { - "MaxPods": 737 - }, - "c5.2xlarge": { - "MaxPods": 58 - }, - "c5.4xlarge": { - "MaxPods": 234 - }, - "c5.9xlarge": { - "MaxPods": 234 - }, - "c5.large": { - "MaxPods": 29 - }, - "c5.xlarge": { - "MaxPods": 58 - }, - "i3.16xlarge": { - "MaxPods": 737 - }, - "i3.2xlarge": { - "MaxPods": 58 - }, - "i3.4xlarge": { - "MaxPods": 234 - }, - "i3.8xlarge": { - "MaxPods": 234 - }, - "i3.large": { - "MaxPods": 29 - }, - "i3.xlarge": { - "MaxPods": 58 - }, - "m3.2xlarge": { - "MaxPods": 118 - }, - "m3.large": { - "MaxPods": 29 - }, - "m3.medium": { - "MaxPods": 12 - }, - "m3.xlarge": { - "MaxPods": 58 - }, - "m4.10xlarge": { - "MaxPods": 234 - }, - "m4.2xlarge": { - "MaxPods": 58 - }, - "m4.4xlarge": { - "MaxPods": 234 - }, - "m4.large": { - "MaxPods": 20 - }, - "m4.xlarge": { - "MaxPods": 58 - }, - "m5.12xlarge": { - "MaxPods": 234 - }, - "m5.24xlarge": { - "MaxPods": 737 - }, - "m5.2xlarge": { - "MaxPods": 58 - }, - "m5.4xlarge": { - "MaxPods": 234 - }, - "m5.large": { - "MaxPods": 29 - }, - "m5.xlarge": { - "MaxPods": 58 - }, - "p2.16xlarge": { - "MaxPods": 234 - }, - "p2.8xlarge": { - "MaxPods": 234 - }, - "p2.xlarge": { - "MaxPods": 58 - }, - "p3.16xlarge": { - "MaxPods": 234 - }, - "p3.2xlarge": { - "MaxPods": 58 - }, - "p3.8xlarge": { - "MaxPods": 234 - }, - "r3.2xlarge": { - "MaxPods": 58 - }, - "r3.4xlarge": { - "MaxPods": 234 - }, - "r3.8xlarge": { - "MaxPods": 234 - }, - "r3.xlarge": { - "MaxPods": 58 - }, - "r4.16xlarge": { - "MaxPods": 737 - }, - "r4.2xlarge": { - "MaxPods": 58 - }, - "r4.4xlarge": { - "MaxPods": 234 - }, - "r4.8xlarge": { - "MaxPods": 234 - }, - "r4.large": { - "MaxPods": 29 - }, - "r4.xlarge": { - "MaxPods": 58 - }, - "t2.2xlarge": { - "MaxPods": 44 - }, - "t2.large": { - "MaxPods": 35 - }, - "t2.medium": { - "MaxPods": 17 - }, - "t2.small": { - "MaxPods": 8 - }, - "t2.xlarge": { - "MaxPods": 44 - }, - "x1.16xlarge": { - "MaxPods": 234 - }, - "x1.32xlarge": { - "MaxPods": 234 - } - } - }, "Outputs": { - "NodeInstanceRole": { + "AutoScalingGroup": { "Value": { - "Ref": "NodeInstanceRole" + "Ref": "AutoScalingGroup" + } + }, + "InstanceProfile": { + "Value": { + "Ref": "InstanceProfile" + } + }, + "InstanceProfileArn": { + "Value": { + "Fn::GetAtt": [ + "InstanceProfile", + "Arn" + ] + } + }, + "LaunchConfiguration": { + "Value": { + "Ref": "LaunchConfiguration" + } + }, + "Role": { + "Value": { + "Ref": "Role" } } }, "Resources": { - "NodeGroup": { + "AutoScalingGroup": { "Properties": { "DesiredCapacity": 1, "LaunchConfigurationName": { - "Ref": "NodeLaunchConfig" + "Ref": "LaunchConfiguration" }, "MaxSize": 3, "MinSize": 1, @@ -214,43 +68,17 @@ } } }, - "NodeInstanceProfile": { + "InstanceProfile": { "Properties": { "Roles": [ { - "Ref": "NodeInstanceRole" + "Ref": "Role" } ] }, "Type": "AWS::IAM::InstanceProfile" }, - "NodeInstanceRole": { - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": [ - "sts:AssumeRole" - ], - "Effect": "Allow", - "Principal": { - "Service": [ - "ec2.amazonaws.com" - ] - } - } - ] - }, - "ManagedPolicyArns": [ - "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", - "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" - ], - "Path": "/" - }, - "Type": "AWS::IAM::Role" - }, - "NodeLaunchConfig": { + "LaunchConfiguration": { "Properties": { "AssociatePublicIpAddress": "false", "BlockDeviceMappings": [ @@ -263,7 +91,7 @@ } ], "IamInstanceProfile": { - "Ref": "NodeInstanceProfile" + "Ref": "InstanceProfile" }, "ImageId": "ami-73a6e20b", "InstanceType": "t2.small", @@ -273,97 +101,43 @@ ], "UserData": { "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash -xe\n", - "CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki", - "\n", - "CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt", - "\n", - "MODEL_DIRECTORY_PATH=~/.aws/eks", - "\n", - "MODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json", - "\n", - "mkdir -p $CA_CERTIFICATE_DIRECTORY", - "\n", - "mkdir -p $MODEL_DIRECTORY_PATH", - "\n", - "curl -o $MODEL_FILE_PATH https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json", - "\n", - "aws configure add-model --service-model file://$MODEL_FILE_PATH --service-name eks", - "\n", - "aws eks describe-cluster --region=", - { - "Ref": "AWS::Region" - }, - " --name=", - "test-k8s", - " --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json", - "\n", - "cat /tmp/describe_cluster_result.json | grep certificateAuthorityData | awk '{print $2}' | sed 's/[,\"]//g' | base64 -d > $CA_CERTIFICATE_FILE_PATH", - "\n", - "MASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | awk '{print $2}' | sed 's/[,\"]//g')", - "\n", - "INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)", - "\n", - "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig", - "\n", - "sed -i s,CLUSTER_NAME,", - "test-k8s", - ",g /var/lib/kubelet/kubeconfig", - "\n", - "sed -i s,REGION,", - { - "Ref": "AWS::Region" - }, - ",g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,MAX_PODS,", - { - "Fn::FindInMap": [ - "MaxPodsPerNode", - "t2.small", - "MaxPods" - ] - }, - ",g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service", - "\n", - "DNS_CLUSTER_IP=10.100.0.10", - "\n", - "if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi", - "\n", - "sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service", - "\n", - "sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig", - "\n", - "sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service", - "\n", - "systemctl daemon-reload", - "\n", - "systemctl restart kubelet", - "\n", - "/opt/aws/bin/cfn-signal -e $? ", - " --stack ", - { - "Ref": "AWS::StackName" - }, - " --resource NodeGroup ", - " --region ", - { - "Ref": "AWS::Region" - }, - "\n" - ] + "Fn::Sub": [ + "\n#!/bin/bash -xe\nCA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki\nCA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt\nMODEL_DIRECTORY_PATH=~/.aws/eks\nMODEL_FILE_PATH=$MODEL_DIRECTORY_PATH/eks-2017-11-01.normal.json\nmkdir -p $CA_CERTIFICATE_DIRECTORY\nmkdir -p $MODEL_DIRECTORY_PATH\ncurl -o $MODEL_FILE_PATH https://s3-us-west-2.amazonaws.com/amazon-eks/1.10.3/2018-06-05/eks-2017-11-01.normal.json\naws configure add-model --service-model file://$MODEL_FILE_PATH --service-name eks\naws eks describe-cluster --region=${\"AWS::Region\"} --name=${ClusterName} --query 'cluster.{certificateAuthorityData: certificateAuthority.data, endpoint: endpoint}' > /tmp/describe_cluster_result.json\ncat /tmp/describe_cluster_result.json | grep certificateAuthorityData | awk '{print $2}' | sed 's/[,\"]//g' | base64 -d > $CA_CERTIFICATE_FILE_PATH\nMASTER_ENDPOINT=$(cat /tmp/describe_cluster_result.json | grep endpoint | awk '{print $2}' | sed 's/[,\"]//g')\nINTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)\nsed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /var/lib/kubelet/kubeconfig\nsed -i s,CLUSTER_NAME,\", cluster_name, \",g /var/lib/kubelet/kubeconfig\nsed -i s,REGION,${AWS::Region},g /etc/systemd/system/kubelet.service\nsed -i s,MAX_PODS,${MaxPods}\n,g /etc/systemd/system/kubelet.service\nsed -i s,MASTER_ENDPOINT,$MASTER_ENDPOINT,g /etc/systemd/system/kubelet.service\nsed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service\nDNS_CLUSTER_IP=10.100.0.10\nif [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi\nsed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service\nsed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig\nsed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service\nsystemctl daemon-reload\nsystemctl restart kubelet\n/opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource NodeGroup --region ${AWS::Region}\n", + { + "ClusterName": "test-k8s", + "MaxPods": 8 + } ] } } }, "Type": "AWS::AutoScaling::LaunchConfiguration" + }, + "Role": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ], + "Path": "/" + }, + "Type": "AWS::IAM::Role" } } } \ No newline at end of file From d21d193d6e4098f3c1e2aab55ddb4c1ddc4b8645 Mon Sep 17 00:00:00 2001 From: Michael Barrett Date: Sun, 5 Aug 2018 19:55:27 -0700 Subject: [PATCH 4/4] EKS requires troposphere 2.3.1 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e090e19e..4b1df7d0 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ install_requires = [ "python-dateutil<3.0.0", "stacker>=1.0.1", - "troposphere>=2.2.2", + "troposphere>=2.3.1", "awacs>=0.8.0", ]