Skip to content
This repository was archived by the owner on Jan 9, 2023. It is now read-only.

Commit 06b43b1

Browse files
committed
Merge branch 'master' into 198-admin-cidr
2 parents 2632079 + 98d9c1a commit 06b43b1

File tree

30 files changed

+375
-85
lines changed

30 files changed

+375
-85
lines changed

docs/spelling_wordlist.txt

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,3 +66,12 @@ prepended
6666
username
6767
loopback
6868
CIDR
69+
addons
70+
autoscaler
71+
prometheus
72+
ubuntu
73+
offline
74+
admin
75+
plugin
76+
checklist
77+
localhost

docs/user-guide.rst

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,32 @@ The configuration file can be found at ``$HOME/.tarmak/tarmak.yaml`` (default).
150150
The Pod Security Policy manifests can be found within the tarmak directory at
151151
``puppet/modules/kubernetes/templates/pod-security-policy.yaml.erb``
152152

153+
Cluster Autoscaler
154+
~~~~~~~~~~~~~~~~~~
155+
156+
Tarmak supports deploying `Cluster Autoscaler
157+
<https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler>`_ when
158+
spinning up a Kubernetes cluster. The following `tarmak.yaml` snippet shows how
159+
you would enable Cluster Autoscaler.
160+
161+
.. code-block:: yaml
162+
163+
kubernetes:
164+
clusterAutoscaler:
165+
enabled: true
166+
...
167+
168+
The above configuration would deploy Cluster Autoscaler with an image of
169+
`gcr.io/google_containers/cluster-autoscaler` using the recommend version based
170+
on the version of your Kubernetes cluster. The configuration block accepts two
171+
optional fields of `image` and `version` allowing you to change these defaults.
172+
Note that the final image tag used when deploying Cluster Autoscaler will be the
173+
configured version prepended with the letter `v`.
174+
175+
The current implementation will configure the first instance pool of type worker
176+
in your cluster configuration to scale between `minCount` and `maxCount`. We
177+
plan to add support for an arbitrary number of worker instance pools.
178+
153179
Logging
154180
~~~~~~~
155181

@@ -336,7 +362,6 @@ certificate is valid for ``jenkins.<environment>.<zone>``.
336362
type: ssd
337363
...
338364
339-
340365
Tiller
341366
~~~~~~
342367

@@ -362,7 +387,6 @@ allows to override the deployed version:
362387
consider Helm's `security best practices
363388
<https://github.com/kubernetes/helm/blob/master/docs/securing_installation.md>`_.
364389

365-
366390
Prometheus
367391
~~~~~~~~~~
368392

pkg/puppet/puppet.go

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -306,11 +306,32 @@ func (p *Puppet) writeHieraData(puppetPath string, cluster interfaces.Cluster) e
306306
return fmt.Errorf("error writing global hiera config: %s", err)
307307
}
308308

309+
// retrieve details for first worker instance pool
310+
workerMinCount := 0
311+
workerMaxCount := 0
312+
workerInstancePoolName := ""
313+
if cluster.Config().Kubernetes.ClusterAutoscaler != nil && cluster.Config().Kubernetes.ClusterAutoscaler.Enabled {
314+
for _, instancePool := range cluster.InstancePools() {
315+
if instancePool.Role().Name() == clusterv1alpha1.KubernetesWorkerRoleName {
316+
workerMinCount = instancePool.MinCount()
317+
workerMaxCount = instancePool.MaxCount()
318+
workerInstancePoolName = instancePool.Name()
319+
break
320+
}
321+
}
322+
}
323+
309324
// loop through instance pools
310325
for _, instancePool := range cluster.InstancePools() {
311326

312327
classes, variables := contentInstancePoolConfig(cluster.Config(), instancePool.Config(), instancePool.Role().Name())
313328

329+
if instancePool.Role().Name() == clusterv1alpha1.KubernetesMasterRoleName && cluster.Config().Kubernetes.ClusterAutoscaler != nil && cluster.Config().Kubernetes.ClusterAutoscaler.Enabled {
330+
variables = append(variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::min_instances: %d`, workerMinCount))
331+
variables = append(variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::max_instances: %d`, workerMaxCount))
332+
variables = append(variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::instance_pool_name: "%s"`, workerInstancePoolName))
333+
}
334+
314335
// classes
315336
err = p.writeLines(
316337
filepath.Join(hieraPath, "instance_pools", fmt.Sprintf("%s_classes.yaml", instancePool.Name())), classes,

pkg/tarmak/cluster/cluster.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -418,12 +418,13 @@ func (c *Cluster) Variables() map[string]interface{} {
418418
if ok {
419419
output[fmt.Sprintf("%s_ami", instancePool.TFName())] = ids
420420
}
421-
output[fmt.Sprintf("%s_instance_count", instancePool.TFName())] = instancePool.Config().MinCount
422421
if instancePool.Config().AllowCIDRs != nil {
423422
output[fmt.Sprintf("%s_admin_cidrs", instancePool.TFName())] = instancePool.Config().AllowCIDRs
424423
} else {
425424
output[fmt.Sprintf("%s_admin_cidrs", instancePool.TFName())] = c.environment.Config().AdminCIDRs
426425
}
426+
output[fmt.Sprintf("%s_min_instance_count", instancePool.TFName())] = instancePool.Config().MinCount
427+
output[fmt.Sprintf("%s_max_instance_count", instancePool.TFName())] = instancePool.Config().MaxCount
427428
}
428429

429430
// set network cidr

pkg/tarmak/instance_pool/instance_pool.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,8 +149,11 @@ func (n *InstancePool) RootVolume() interfaces.Volume {
149149
return n.rootVolume
150150
}
151151

152-
func (n *InstancePool) Count() int {
153-
// TODO: this needs to be replaced by Max/Min
152+
func (n *InstancePool) MinCount() int {
153+
return n.conf.MinCount
154+
}
155+
156+
func (n *InstancePool) MaxCount() int {
154157
return n.conf.MaxCount
155158
}
156159

pkg/tarmak/interfaces/interfaces.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,8 @@ type InstancePool interface {
249249
Volumes() []Volume
250250
Zones() []string
251251
Validate() error
252+
MinCount() int
253+
MaxCount() int
252254
}
253255

254256
type Volume interface {

puppet/hieradata/common.yaml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,6 @@ tarmak::kubernetes_api_url: "https://api.%{::tarmak_cluster}.%{::tarmak_dns_root
1515
# TODO: This should come from terraform
1616
tarmak::etcd_instances: 3
1717

18-
# cluster scaler config
19-
kubernetes_addons::cluster_autoscaler::min_instances: 3
20-
kubernetes_addons::cluster_autoscaler::max_instances: 10
21-
2218
# point heapster to influxdb
2319
kubernetes_addons::heapster::sink: influxdb:http://monitoring-influxdb.kube-system:8086
2420

puppet/modules/aws_es_proxy/spec/acceptance/nodesets/default.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ HOSTS:
77
- k8s-worker
88
platform: el-7-x86_64
99
box: centos/7
10-
box_version: 1710.01
10+
box_version: 1804.02
1111
hypervisor: vagrant_libvirt
1212
CONFIG:
1313
type: foss

puppet/modules/calico/spec/acceptance/nodesets/default.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ HOSTS:
55
- master
66
platform: el-7-x86_64
77
box: centos/7
8-
box_version: 1710.01
8+
box_version: 1804.02
99
hypervisor: vagrant_libvirt
1010
CONFIG:
1111
type: foss

puppet/modules/etcd/spec/acceptance/nodesets/3n-cluster.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,23 +6,23 @@ HOSTS:
66
- master
77
platform: el-7-x86_64
88
box: centos/7
9-
box_version: 1710.01
9+
box_version: 1804.02
1010
hypervisor: vagrant_libvirt
1111
ip: 10.123.0.11
1212
etcd2:
1313
roles:
1414
- etcd
1515
platform: el-7-x86_64
1616
box: centos/7
17-
box_version: 1710.01
17+
box_version: 1804.02
1818
hypervisor: vagrant_libvirt
1919
ip: 10.123.0.12
2020
etcd3:
2121
roles:
2222
- etcd
2323
platform: el-7-x86_64
2424
box: centos/7
25-
box_version: 1710.01
25+
box_version: 1804.02
2626
hypervisor: vagrant_libvirt
2727
ip: 10.123.0.13
2828
CONFIG:

0 commit comments

Comments
 (0)