diff --git a/ansible/roles/haproxy/defaults/main.yml b/ansible/roles/haproxy/defaults/main.yml new file mode 100644 index 00000000..19224ae9 --- /dev/null +++ b/ansible/roles/haproxy/defaults/main.yml @@ -0,0 +1,16 @@ +--- +# HA Proxy cluster API server loadbalancer install mode; +# Default is 'manifest' options are 'manifest' or 'package' +haproxy_install_mode: 'manifest' + +haproxy_config_dir: '/etc/haproxy' + +haproxy_image_repo: 'haproxy' +haproxy_image_tag: '2.3.5-alpine' + +# apiserver loadbalancer +haproxy_memory_requests: 32M +haproxy_cpu_requests: 25m + +haproxy_keepalive_timeout: 5m +haproxy_healthcheck_port: 8401 diff --git a/ansible/roles/haproxy/tasks/main.yml b/ansible/roles/haproxy/tasks/main.yml index cb1d01b5..778dcad4 100644 --- a/ansible/roles/haproxy/tasks/main.yml +++ b/ansible/roles/haproxy/tasks/main.yml @@ -1,9 +1,23 @@ --- -- name: preflight checks - include_tasks: pre_checks.yml +# This role implements the Software Load Balancing approach described at this location +# https://github.com/kubernetes/kubeadm/blob/master/docs/ha-considerations.md#options-for-software-load-balancing +- name: 'preflight checks' + ansible.builtin.include_tasks: 'pre_checks.yml' -- name: include install tasks - include_tasks: install.yml +- name: 'configure necessary haproxy sysctl configuration' + ansible.builtin.sysctl: + name: 'net.ipv4.ip_nonlocal_bind' + value: '1' + state: 'present' + sysctl_file: '/etc/sysctl.d/55-haproxy.conf' + sysctl_set: true -- name: include configure tasks - include_tasks: configure.yml +- name: 'packages | configure haproxy on control plane nodes' + ansible.builtin.include_tasks: 'packages.yml' + when: + - haproxy_install_mode == 'package' + +- name: 'manifests | configure haproxy as a static pod within the cluster' + ansible.builtin.include_tasks: 'manifests.yml' + when: + - haproxy_install_mode == 'manifest' diff --git a/ansible/roles/haproxy/tasks/manifests.yml b/ansible/roles/haproxy/tasks/manifests.yml new file mode 100644 index 00000000..17638321 --- /dev/null +++ b/ansible/roles/haproxy/tasks/manifests.yml @@ -0,0 +1,41 @@ +--- +- name: 'manifests | ensure kubernetes manifests directory "{{ cluster_kubernetes_manifests_path }}" exists' + ansible.builtin.file: + path: '{{ cluster_kubernetes_manifests_path }}' + state: 'directory' + owner: 'root' + group: 'root' + mode: 0700 + +- name: 'manifests | ensure haproxy config directory "{{ haproxy_config_dir }}" exists' + ansible.builtin.file: + path: '{{ haproxy_config_dir }}' + state: 'directory' + owner: 'root' + group: 'root' + mode: 0700 + +- name: 'manifests | ensure haproxy configuration file is up to date' + ansible.builtin.template: + src: 'haproxy.cfg.j2' + dest: '{{ haproxy_config_dir }}/haproxy.cfg' + owner: 'root' + group: 'root' + mode: 0644 + backup: true + +- name: 'manifests | get current checksum of haproxy config file' + ansible.builtin.stat: + path: '{{ haproxy_config_dir }}/haproxy.cfg' + get_attributes: false + get_checksum: true + get_mime: false + register: haproxy_config_stat + +- name: 'manifests | write haproxy static pod manifest' + ansible.builtin.template: + src: 'haproxy.yaml.j2' + dest: '{{ cluster_kubernetes_manifests_path }}/haproxy.yaml' + owner: 'root' + group: 'root' + mode: 0644 diff --git a/ansible/roles/haproxy/tasks/packages.yml b/ansible/roles/haproxy/tasks/packages.yml new file mode 100644 index 00000000..966c9ac0 --- /dev/null +++ b/ansible/roles/haproxy/tasks/packages.yml @@ -0,0 +1,5 @@ +--- +- name: 'haproxy | Update apt-cache if it has not been updated in the last 6 hours (21600 seconds)' + ansible.builtin.apt: + update_cache: yes + cache_valid_time: 21600 diff --git a/ansible/roles/haproxy/tasks/pre_checks.yml b/ansible/roles/haproxy/tasks/pre_checks.yml index c7bf5bea..9fea6a41 100644 --- a/ansible/roles/haproxy/tasks/pre_checks.yml +++ b/ansible/roles/haproxy/tasks/pre_checks.yml @@ -1,9 +1,9 @@ --- - name: 'validate variable : keepalived_vip' - assert: + ansible.builtin.assert: that: - keepalived_vip | ipaddr fail_msg: - - "Variable 'keepalived_vip' should be a valid ip address" + - 'Variable "keepalived_vip" should be a valid ip address' - 'Type is: {{ keepalived_vip | type_debug }}' - - "Value is: {{ keepalived_vip | default('undefined') }}" + - 'Value is: {{ keepalived_vip | default("undefined") }}' diff --git a/ansible/roles/haproxy/templates/haproxy.cfg.j2 b/ansible/roles/haproxy/templates/haproxy.cfg.j2 index 71b907cc..5fa292e8 100644 --- a/ansible/roles/haproxy/templates/haproxy.cfg.j2 +++ b/ansible/roles/haproxy/templates/haproxy.cfg.j2 @@ -1,23 +1,59 @@ -#################### -# api-proxy config # -#################### +{{ ansible_managed | comment }} -frontend front-api-proxy - bind {{ keepalived_vip }}:8443 - bind 127.0.0.1:8443 +# This configuration provides some sensible defaults - modeled on the default configuration found here +# https://github.com/haproxytech/haproxy-docker-alpine/blob/master/2.3/haproxy.cfg +global + log stdout format raw local0 + stats socket /var/run/haproxy.stat + user haproxy + group haproxy + chroot /var/empty + daemon + +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option redispatch + retries 5 + timeout http-request 5m + timeout queue 5m + timeout connect 30s + timeout client {{ haproxy_keepalive_timeout }} + timeout server 15m + timeout http-keep-alive 30s + timeout check 30s + maxconn 4000 + +frontend stats + bind *:8404 + mode http + stats enable + stats uri /stats + stats refresh 10s + stats admin if TRUE + +{% if haproxy_healthcheck_port is defined -%} +frontend healthz + bind *:{{ haproxy_healthcheck_port }} + mode http + monitor-uri /healthz +{% endif -%} + +frontend apiserver + bind {{ cluster_apiserver_frontend_ip }}:{{ cluster_apiserver_frontend_port }} mode tcp option tcplog - timeout client 4h - default_backend kube-api + default_backend apiserver -backend kube-api +backend apiserver mode tcp - option tcplog - option tcp-check - timeout connect 10s - timeout server 4h - balance roundrobin - default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 -{% for host in groups['masters'] %} - server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_host'] }}:6443 check -{% endfor %} + balance leastconn + default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100 + option httpchk GET /healthz + http-check expect status 200 + {% for host in groups['controlplane'] -%} + server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_default_ipv4'].address }}:{{ cluster_apiserver_port }} check check-ssl verify none + {% endfor -%} diff --git a/ansible/roles/haproxy/templates/haproxy.yaml.j2 b/ansible/roles/haproxy/templates/haproxy.yaml.j2 new file mode 100644 index 00000000..7b15c03b --- /dev/null +++ b/ansible/roles/haproxy/templates/haproxy.yaml.j2 @@ -0,0 +1,43 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: haproxy + namespace: kube-system + creationTimestamp: null + labels: + component: haproxy + tier: control-plane + annotations: + haproxy-config-checksum: "{{ haproxy_config_stat.stat.checksum }}" +spec: + containers: + - image: {{ haproxy_image_repo }}:{{ haproxy_image_tag }} + imagePullPolicy: IfNotPresent + name: haproxy + resources: + requests: + cpu: {{ haproxy_cpu_requests }} + memory: {{ haproxy_memory_requests }} + {% if haproxy_healthcheck_port is defined -%} + livenessProbe: + httpGet: + path: /healthz + port: {{ haproxy_healthcheck_port }} + readinessProbe: + httpGet: + path: /healthz + port: {{ haproxy_healthcheck_port }} + {% endif -%} + volumeMounts: + - mountPath: /usr/local/etc/haproxy/ + name: haproxyconf + readOnly: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + priorityClassName: system-node-critical + volumes: + - hostPath: + path: {{ haproxy_config_dir }} + name: haproxyconf +status: {}