diff --git a/ansible/inventory/group_vars/kubernetes/k3s.yml b/ansible/inventory/group_vars/kubernetes/kubernetes.yml similarity index 97% rename from ansible/inventory/group_vars/kubernetes/k3s.yml rename to ansible/inventory/group_vars/kubernetes/kubernetes.yml index 18f60445740..4ca217ea7ff 100644 --- a/ansible/inventory/group_vars/kubernetes/k3s.yml +++ b/ansible/inventory/group_vars/kubernetes/kubernetes.yml @@ -8,7 +8,6 @@ k3s_release_version: "v1.27.3+k3s1" k3s_install_hard_links: true k3s_become: true -k3s_debug: false k3s_etcd_datastore: true k3s_use_unsupported_config: true k3s_registration_address: "{{ kubevip_address }}" @@ -26,6 +25,7 @@ k3s_server_manifests_urls: # /var/lib/rancher/k3s/server/manifests k3s_server_manifests_templates: - custom-cilium-helmchart.yaml.j2 + - custom-coredns-helmchart.yaml.j2 # /var/lib/rancher/k3s/agent/pod-manifests k3s_server_pod_manifests_templates: - kube-vip-static-pod.yaml.j2 diff --git a/ansible/inventory/group_vars/kubernetes/os.yml b/ansible/inventory/group_vars/kubernetes/os.yml deleted file mode 100644 index 9a5bb19c5a4..00000000000 --- a/ansible/inventory/group_vars/kubernetes/os.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- - -# ssh_authorized_keys: [] - -ubuntu: - packages: - - hdparm - - htop - - ipvsadm - - lm-sensors - - nano - - nfs-common - - nvme-cli - - socat - - python3-kubernetes - - python3-yaml diff --git a/ansible/inventory/group_vars/master/k3s.yml b/ansible/inventory/group_vars/master/kubernetes.yml similarity index 93% rename from ansible/inventory/group_vars/master/k3s.yml rename to ansible/inventory/group_vars/master/kubernetes.yml index 5bcc496bf2d..6bbe206ffff 100644 --- a/ansible/inventory/group_vars/master/k3s.yml +++ b/ansible/inventory/group_vars/master/kubernetes.yml @@ -10,6 +10,7 @@ k3s_server: docker: false flannel-backend: "none" # This needs to be in quotes disable: + - coredns # Disable coredns - replaced with Helm Chart - flannel # Disable flannel - replaced with Cilium - local-storage # Disable local-path-provisioner - installed with Flux - metrics-server # Disable metrics-server - installed with Flux @@ -21,10 +22,10 @@ k3s_server: write-kubeconfig-mode: "644" cluster-cidr: "{{ cluster_cidr }}" service-cidr: "{{ service_cidr }}" + etcd-expose-metrics: true # Required to monitor etcd with kube-prometheus-stack kube-controller-manager-arg: - "bind-address=0.0.0.0" # Required to monitor kube-controller-manager with kube-prometheus-stack kube-scheduler-arg: - "bind-address=0.0.0.0" # Required to monitor kube-scheduler with kube-prometheus-stack - etcd-expose-metrics: true # Required to monitor etcd with kube-prometheus-stack kube-apiserver-arg: - "anonymous-auth=true" # Required for HAProxy health-checks diff --git a/ansible/playbooks/cluster-installation.yml b/ansible/playbooks/cluster-installation.yml index 6beb7c07e5d..2d1edb232b6 100644 --- a/ansible/playbooks/cluster-installation.yml +++ b/ansible/playbooks/cluster-installation.yml @@ -1,7 +1,6 @@ --- -- hosts: - - master - - worker +- name: Cluster Installation + hosts: all become: true gather_facts: true any_errors_fatal: true @@ -14,10 +13,10 @@ check_mode: false ansible.builtin.stat: path: /etc/rancher/k3s/config.yaml - register: k3s_check_installed + register: k3s_installed - name: Ignore manifests templates and urls if the cluster is already installed - when: k3s_check_installed.stat.exists + when: k3s_installed.stat.exists ansible.builtin.set_fact: k3s_server_manifests_templates: [] k3s_server_manifests_urls: [] @@ -29,168 +28,48 @@ vars: k3s_state: installed - - name: Get absolute path to this Git repository - delegate_to: localhost - become: false - run_once: true - check_mode: false - ansible.builtin.command: git rev-parse --show-toplevel - register: repository_path + - name: Kubeconfig + ansible.builtin.include_tasks: tasks/kubeconfig.yml - - name: Copy kubeconfig to the project directory - when: k3s_primary_control_node - ansible.builtin.fetch: - src: /etc/rancher/k3s/k3s.yaml - dest: "{{ repository_path.stdout }}/kubeconfig" - flat: true + - name: Wait for custom manifests to rollout + when: + - k3s_primary_control_node + - (k3s_server_manifests_templates | length > 0 + or k3s_server_manifests_urls | length > 0) + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: "{{ item.kind }}" + name: "{{ item.name }}" + namespace: "{{ item.namespace | default('') }}" + wait: true + wait_sleep: 10 + wait_timeout: 360 + loop: + - { name: cilium, kind: HelmChart, namespace: kube-system } + - { name: coredns, kind: HelmChart, namespace: kube-system } + - { name: podmonitors.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: prometheusrules.monitoring.coreos.com, kind: CustomResourceDefinition } + - { name: servicemonitors.monitoring.coreos.com, kind: CustomResourceDefinition } - - name: Update kubeconfig with the correct load balancer address - delegate_to: localhost - become: false - run_once: true - ansible.builtin.replace: - path: "{{ repository_path.stdout }}/kubeconfig" - regexp: https://127.0.0.1:6443 - replace: "https://{{ k3s_registration_address }}:6443" + - name: Coredns + when: + - k3s_primary_control_node + - (k3s_server_manifests_templates | length > 0 + or k3s_server_manifests_urls | length > 0) + ansible.builtin.include_tasks: tasks/coredns.yml - - name: Custom manifests (1) + - name: Cilium when: - k3s_primary_control_node - (k3s_server_manifests_templates | length > 0 or k3s_server_manifests_urls | length > 0) - block: - - name: Custom manifests (1) | Wait for custom manifests to rollout - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: "{{ item.kind }}" - name: "{{ item.name }}" - namespace: "{{ item.namespace | default('') }}" - wait: true - wait_sleep: 10 - wait_timeout: 360 - loop: - - name: cilium - kind: HelmChart - namespace: kube-system - - name: podmonitors.monitoring.coreos.com - kind: CustomResourceDefinition - - name: prometheusrules.monitoring.coreos.com - kind: CustomResourceDefinition - - name: servicemonitors.monitoring.coreos.com - kind: CustomResourceDefinition - - name: Custom manifests (1) | Wait for Cilium to rollout - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: Job - name: helm-install-cilium - namespace: kube-system - wait: true - wait_condition: - type: Complete - status: true - wait_timeout: 360 - # Unmanage and remove the Cilium HelmChart in-order for - # flux to take over managing the lifecycle of Cilium - - name: Custom manifests (1) | Patch the Cilium HelmChart to unmanage it - kubernetes.core.k8s_json_patch: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - patch: - - op: add - path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged - value: "true" - - name: Custom manifests (1) | Delete the Cilium HelmChart CR - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - state: absent - - name: Custom manifests (1) | Check if Cilium HelmChart was deleted - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - register: cilium_helmchart - - name: Custom manifests (1) | Force delete the Cilium HelmChart - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - state: patched - definition: - metadata: - finalizers: [] + ansible.builtin.include_tasks: tasks/cilium.yml - # Cleaning up certain manifests from the /var/lib/rancher/k3s/server/manifests directory - # is needed because k3s has an awesome "feature" to always re-deploy them when the k3s - # service is restarted. Removing them does not uninstall the manifests from your cluster. - - name: Custom manifests (2) + - name: Cruft when: k3s_primary_control_node - block: - - name: Custom manifests (2) | Get list of custom mantifests - ansible.builtin.find: - paths: "{{ k3s_server_manifests_dir }}" - file_type: file - use_regex: true - patterns: ["^custom-.*"] - register: custom_manifest - - name: Custom manifests (2) | Delete custom mantifests - ansible.builtin.file: - path: "{{ item.path }}" - state: absent - loop: "{{ custom_manifest.files }}" - - name: Custom manifests (2) | Get list of custom addons - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: Addon - register: addons_list - - name: Custom manifests (2) | Delete addons - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: "{{ item.metadata.name }}" - kind: Addon - namespace: kube-system - state: absent - loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" + ansible.builtin.include_tasks: tasks/cruft.yml - # https://github.com/k3s-io/k3s/issues/1900 - - name: Stale containers - block: - - name: Stale containers | Create systemd unit - ansible.builtin.blockinfile: - path: /etc/systemd/system/stale-containers.service - create: true - mode: "0644" - block: | - [Unit] - Description=Clean up stale containers - [Service] - Type=oneshot - ExecStart=/usr/local/bin/k3s crictl rmi --prune > /dev/null 2>&1 - - name: Stale containers | Create systemd timer - ansible.builtin.blockinfile: - path: /etc/systemd/system/stale-containers.timer - create: true - mode: "0644" - block: | - [Unit] - Description=Clean up stale containers - [Timer] - OnCalendar=weekly - AccuracySec=1h - Persistent=true - RandomizedDelaySec=6000 - [Install] - WantedBy=timers.target - - name: Stale containers | Start the systemd timer - ansible.builtin.systemd: - name: stale-containers.timer - enabled: true - daemon_reload: true - state: started + - name: Stale Containers + ansible.builtin.include_tasks: tasks/stale_containers.yml + vars: + stale_containers_state: enabled diff --git a/ansible/playbooks/cluster-nuke.yml b/ansible/playbooks/cluster-nuke.yml index d3df8aa78fd..0d5c2cc55c9 100644 --- a/ansible/playbooks/cluster-nuke.yml +++ b/ansible/playbooks/cluster-nuke.yml @@ -1,7 +1,6 @@ --- -- hosts: - - master - - worker +- name: Cluster Nuke + hosts: all become: true gather_facts: true any_errors_fatal: true @@ -22,12 +21,15 @@ ansible.builtin.pause: seconds: 5 tasks: - - name: Stop Kubernetes - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: stopped + - name: Stop Kubernetes # noqa: ignore-errors + ignore_errors: true + block: + - name: Stop Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: stopped # https://github.com/k3s-io/docs/blob/main/docs/installation/network-options.md - name: Networking @@ -60,6 +62,11 @@ vars: k3s_state: uninstalled + - name: Stale Containers + ansible.builtin.include_tasks: tasks/stale_containers.yml + vars: + stale_containers_state: disabled + - name: Reboot ansible.builtin.reboot: msg: Rebooting nodes diff --git a/ansible/playbooks/cluster-prepare.yml b/ansible/playbooks/cluster-prepare.yml index 3f492a5b5b3..39e9efce46a 100644 --- a/ansible/playbooks/cluster-prepare.yml +++ b/ansible/playbooks/cluster-prepare.yml @@ -1,7 +1,6 @@ --- -- hosts: - - master - - worker +- name: Prepare System + hosts: all become: true gather_facts: true any_errors_fatal: true @@ -28,12 +27,12 @@ block: | 127.0.1.1 {{ inventory_hostname }} # https://github.com/cilium/cilium/issues/18706 - - name: Network | Cilium (1) + - name: Networking | Cilium (1) ansible.builtin.lineinfile: dest: /etc/systemd/networkd.conf regexp: ManageForeignRoutingPolicyRules line: ManageForeignRoutingPolicyRules=no - - name: Network Configuration | Cilium (2) + - name: Networking | Cilium (2) ansible.builtin.lineinfile: dest: /etc/systemd/networkd.conf regexp: ManageForeignRoutes @@ -43,12 +42,10 @@ block: - name: Packages | Install required packages ansible.builtin.apt: - name: "{{ ubuntu.packages | default([]) }}" + name: hdparm,htop,ipvsadm,lm-sensors,nano,nfs-common,nvme-cli, + socat,python3-kubernetes,python3-yaml state: present update_cache: true - - name: Packages | Remove leaf packages - ansible.builtin.apt: - autoremove: true - name: Packages | RasPi packages ansible.builtin.apt: name: ["linux-modules-extra-raspi"] @@ -56,14 +53,6 @@ notify: Reboot when: "'raspi' in ansible_kernel" - - name: User Configuration - block: - - name: User Configuration | Add additional SSH public keys - ansible.posix.authorized_key: - user: "{{ ansible_user }}" - key: "{{ item }}" - loop: "{{ public_ssh_keys | default([]) }}" - - name: System Configuration (1) block: - name: System Configuration (1) | Disable ufw @@ -87,13 +76,13 @@ community.general.modprobe: name: "{{ item }}" state: present - loop: [br_netfilter, ip_vs, ip_vs_rr, overlay, rbd] + loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] - name: System Configuration (2) | Enable kernel modules on boot ansible.builtin.copy: - mode: 0644 + mode: "0644" content: "{{ item }}" dest: "/etc/modules-load.d/{{ item }}.conf" - loop: [br_netfilter, ip_vs, ip_vs_rr, overlay, rbd] + loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] - name: System Configuration (2) | Set sysctls ansible.posix.sysctl: name: "{{ item.key }}" @@ -103,15 +92,9 @@ with_dict: "{{ sysctl_config }}" vars: sysctl_config: - net.ipv4.ip_forward: 1 - net.ipv4.conf.all.forwarding: 1 - net.ipv4.conf.all.rp_filter: 0 - net.ipv4.conf.default.rp_filter: 0 - net.ipv6.conf.all.forwarding: 1 - net.bridge.bridge-nf-call-iptables: 1 - net.bridge.bridge-nf-call-ip6tables: 1 + fs.inotify.max_queued_events: 65536 fs.inotify.max_user_watches: 524288 - fs.inotify.max_user_instances: 512 + fs.inotify.max_user_instances: 8192 - name: System Configuration (2) | Disable swap at runtime ansible.builtin.command: swapoff -a when: ansible_swaptotal_mb > 0 diff --git a/ansible/playbooks/cluster-reboot.yml b/ansible/playbooks/cluster-reboot.yml index 774f2f8f4da..4adcfe43595 100644 --- a/ansible/playbooks/cluster-reboot.yml +++ b/ansible/playbooks/cluster-reboot.yml @@ -1,7 +1,6 @@ --- -- hosts: - - master - - worker +- name: Reboot + hosts: all become: true gather_facts: true any_errors_fatal: true diff --git a/ansible/playbooks/cluster-upgrade.yml b/ansible/playbooks/cluster-upgrade.yml index a26df40285d..4c7dfd13e36 100644 --- a/ansible/playbooks/cluster-upgrade.yml +++ b/ansible/playbooks/cluster-upgrade.yml @@ -1,7 +1,6 @@ --- -- hosts: - - master - - worker +- name: Cluster Upgrade + hosts: master become: true gather_facts: true any_errors_fatal: true @@ -18,9 +17,6 @@ k3s_state: started - name: Upgrade kube-vip - when: - - k3s_control_node is defined - - k3s_control_node ansible.builtin.template: src: templates/kube-vip-static-pod.yaml.j2 dest: "{{ k3s_server_pod_manifests_dir }}/kube-vip-static-pod.yaml" diff --git a/ansible/playbooks/files/stale-containers.service b/ansible/playbooks/files/stale-containers.service new file mode 100644 index 00000000000..21c69cc75c9 --- /dev/null +++ b/ansible/playbooks/files/stale-containers.service @@ -0,0 +1,6 @@ +[Unit] +Description=Stale containers + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/k3s crictl rmi --prune > /dev/null 2>&1 diff --git a/ansible/playbooks/files/stale-containers.timer b/ansible/playbooks/files/stale-containers.timer new file mode 100644 index 00000000000..731885a14dc --- /dev/null +++ b/ansible/playbooks/files/stale-containers.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Stale containers + +[Timer] +OnCalendar=weekly +AccuracySec=1h +Persistent=true +RandomizedDelaySec=6000 + +[Install] +WantedBy=timers.target diff --git a/ansible/playbooks/tasks/cilium.yml b/ansible/playbooks/tasks/cilium.yml new file mode 100644 index 00000000000..2d1b722fdd6 --- /dev/null +++ b/ansible/playbooks/tasks/cilium.yml @@ -0,0 +1,53 @@ +--- +- name: Cilium + block: + - name: Cilium | Wait for Cilium to rollout + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: helm-install-cilium + kind: Job + namespace: kube-system + wait: true + wait_condition: + type: Complete + status: true + wait_timeout: 360 + + - name: Cilium | Patch the Cilium HelmChart to unmanage it + kubernetes.core.k8s_json_patch: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + patch: + - op: add + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged + value: "true" + + - name: Cilium | Delete the Cilium HelmChart CR + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: absent + + - name: Cilium | Check if Cilium HelmChart was deleted + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + register: cilium_helmchart + + - name: Cilium | Force delete the Cilium HelmChart + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: patched + definition: + metadata: + finalizers: [] diff --git a/ansible/playbooks/tasks/coredns.yml b/ansible/playbooks/tasks/coredns.yml new file mode 100644 index 00000000000..5d46fce88dc --- /dev/null +++ b/ansible/playbooks/tasks/coredns.yml @@ -0,0 +1,53 @@ +--- +- name: Coredns + block: + - name: Coredns | Wait for Coredns to rollout + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: helm-install-coredns + kind: Job + namespace: kube-system + wait: true + wait_condition: + type: Complete + status: true + wait_timeout: 360 + + - name: Coredns | Patch the Coredns HelmChart to unmanage it + kubernetes.core.k8s_json_patch: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + patch: + - op: add + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged + value: "true" + + - name: Coredns | Delete the Cilium HelmChart CR + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + state: absent + + - name: Coredns | Check if Cilium HelmChart was deleted + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + register: coredns_helmchart + + - name: Coredns | Force delete the Cilium HelmChart + when: coredns_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: coredns + kind: HelmChart + namespace: kube-system + state: patched + definition: + metadata: + finalizers: [] diff --git a/ansible/playbooks/tasks/cruft.yml b/ansible/playbooks/tasks/cruft.yml new file mode 100644 index 00000000000..66ae984f2f2 --- /dev/null +++ b/ansible/playbooks/tasks/cruft.yml @@ -0,0 +1,32 @@ +--- +# https://github.com/k3s-io/k3s/issues/1971 +- name: Cruft + block: + - name: Cruft | Get list of custom mantifests + ansible.builtin.find: + paths: "{{ k3s_server_manifests_dir }}" + file_type: file + use_regex: true + patterns: ["^custom-.*"] + register: custom_manifest + + - name: Cruft | Delete custom mantifests + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ custom_manifest.files }}" + + - name: Cruft | Get list of custom addons + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: Addon + register: addons_list + + - name: Cruft | Delete addons + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: "{{ item.metadata.name }}" + kind: Addon + namespace: kube-system + state: absent + loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" diff --git a/ansible/playbooks/tasks/kubeconfig.yaml b/ansible/playbooks/tasks/kubeconfig.yaml new file mode 100644 index 00000000000..2e3598a8587 --- /dev/null +++ b/ansible/playbooks/tasks/kubeconfig.yaml @@ -0,0 +1,24 @@ +--- +- name: Get absolute path to this Git repository + delegate_to: localhost + become: false + run_once: true + check_mode: false + ansible.builtin.command: git rev-parse --show-toplevel + register: repository_path + +- name: Copy kubeconfig to the project directory + when: k3s_primary_control_node + ansible.builtin.fetch: + src: /etc/rancher/k3s/k3s.yaml + dest: "{{ repository_path.stdout }}/kubeconfig" + flat: true + +- name: Update kubeconfig with the correct load balancer address + delegate_to: localhost + become: false + run_once: true + ansible.builtin.replace: + path: "{{ repository_path.stdout }}/kubeconfig" + regexp: https://127.0.0.1:6443 + replace: "https://{{ k3s_registration_address }}:6443" diff --git a/ansible/playbooks/tasks/stale_containers.yml b/ansible/playbooks/tasks/stale_containers.yml new file mode 100644 index 00000000000..9857d6bce6a --- /dev/null +++ b/ansible/playbooks/tasks/stale_containers.yml @@ -0,0 +1,36 @@ +--- +# https://github.com/k3s-io/k3s/issues/1900 +- name: Enabled Stale containers + when: stale_containers_state == "enabled" + block: + - name: Stale containers | Create systemd unit + ansible.builtin.copy: + src: files/stale-containers.service + dest: /etc/systemd/system/stale-containers.service + owner: root + group: root + mode: "0644" + + - name: Stale containers | Create systemd timer + ansible.builtin.copy: + src: files/stale-containers.timer + dest: /etc/systemd/system/stale-containers.timer + owner: root + group: root + mode: "0644" + + - name: Stale containers | Start the systemd timer + ansible.builtin.systemd: + name: stale-containers.timer + enabled: true + daemon_reload: true + masked: false + state: started + +- name: Disable Stale containers + when: stale_containers_state == "disabled" + block: + - name: Stale containers | Mask the systemd timer + ansible.builtin.systemd: + name: stale-containers.timer + masked: true diff --git a/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2 b/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2 new file mode 100644 index 00000000000..7eb178695d8 --- /dev/null +++ b/ansible/playbooks/templates/custom-coredns-helmchart.yaml.j2 @@ -0,0 +1,79 @@ +--- +# https://docs.k3s.io/helm +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: coredns + namespace: kube-system +spec: + repo: https://coredns.github.io/helm + chart: coredns + # renovate: datasource=helm depName=coredns registryUrl=https://coredns.github.io/helm + version: "1.24.1" + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + fullnameOverride: coredns + image: + repository: registry.k8s.io/coredns/coredns + tag: v1.10.1 + replicaCount: 3 + service: + # Choose the 10th IP address from the start of the service-cidr + clusterIP: {{ k3s_server['service-cidr'] | ansible.utils.nthhost(10) }} + serviceAccount: + create: true + deployment: + annotations: + reloader.stakater.com/auto: "true" + servers: + - zones: + - zone: . + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: log + - name: errors + - name: health + configBlock: |- + lameduck 5s + - name: ready + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: coredns