diff --git a/.devcontainer/ci/devcontainer.json b/.devcontainer/ci/devcontainer.json index b38aa2b5186..2064da8c95e 100644 --- a/.devcontainer/ci/devcontainer.json +++ b/.devcontainer/ci/devcontainer.json @@ -19,7 +19,6 @@ "terminal.integrated.defaultProfile.linux": "fish" }, "extensions": [ - "redhat.ansible", "redhat.vscode-yaml" ] } diff --git a/.envrc b/.envrc index 23b9adce852..48f62a73f1e 100644 --- a/.envrc +++ b/.envrc @@ -5,13 +5,6 @@ export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)" PATH_add "$(expand_path ./.venv/bin)" export VIRTUAL_ENV="$(expand_path ./.venv)" export PYTHONDONTWRITEBYTECODE="1" -# Ann Seabelle -export ANSIBLE_COLLECTIONS_PATH=$(expand_path ./.venv/galaxy) -export ANSIBLE_ROLES_PATH=$(expand_path ./.venv/galaxy/ansible_roles) -export ANSIBLE_VARS_ENABLED="host_group_vars" -export ANSIBLE_LOCALHOST_WARNING="False" -export ANSIBLE_INVENTORY_UNPARSED_WARNING="False" -export K8S_AUTH_KUBECONFIG="$(expand_path ./kubeconfig)" # Talos export TALOSCONFIG="$(expand_path ./kubernetes/bootstrap/talos/clusterconfig/talosconfig)" # Bin diff --git a/.github/labeler.yaml b/.github/labeler.yaml index f0a70f568fc..a6f857b0850 100644 --- a/.github/labeler.yaml +++ b/.github/labeler.yaml @@ -1,7 +1,4 @@ --- -area/ansible: - - changed-files: - - any-glob-to-any-file: ansible/**/* area/bootstrap: - changed-files: - any-glob-to-any-file: bootstrap/**/* diff --git a/.github/labels.yaml b/.github/labels.yaml index 88510325d40..6021b69c1d2 100644 --- a/.github/labels.yaml +++ b/.github/labels.yaml @@ -1,15 +1,12 @@ --- # Area -- { name: "area/ansible", color: "0e8a16" } - { name: "area/bootstrap", color: "0e8a16" } - { name: "area/github", color: "0e8a16" } - { name: "area/kubernetes", color: "0e8a16" } - { name: "area/taskfile", color: "0e8a16" } # Distro -- { name: "distro/k3s", color: "ffc300" } - { name: "distro/talos", color: "ffc300" } # Renovate -- { name: "renovate/ansible", color: "027fa0" } - { name: "renovate/container", color: "027fa0" } - { name: "renovate/github-action", color: "027fa0" } - { name: "renovate/github-release", color: "027fa0" } diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 0d94f3c736f..9fde01006c8 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -15,29 +15,22 @@ "schedule": ["on saturday"], "flux": { "fileMatch": [ - "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" ] }, "helm-values": { "fileMatch": [ - "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" ] }, "helmfile": { "fileMatch": [ - "(^|/)helmfile\\.ya?ml(?:\\.j2)?$", - "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" + "(^|/)helmfile\\.ya?ml(?:\\.j2)?$" ] }, "kubernetes": { "fileMatch": [ - "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" ] }, "kustomize": { @@ -50,11 +43,6 @@ "(^|/)[\\w-]*requirements(-\\w+)?\\.(txt|pip)(?:\\.j2)?$" ] }, - "ansible-galaxy": { - "fileMatch": [ - "(^|/)(galaxy|requirements)(\\.ansible)?\\.ya?ml(?:\\.j2)?$" - ] - }, // commit message topics "commitMessageTopic": "{{depName}}", "commitMessageExtra": "to {{newVersion}}", @@ -92,13 +80,6 @@ }, "separateMinorPatch": true }, - // custom versioning - { - "description": ["Use custom versioning for k3s"], - "matchDatasources": ["github-releases"], - "versioning": "regex:^v(?\\d+)\\.(?\\d+)\\.(?\\d+)(?\\+k.s)\\.?(?\\d+)$", - "matchPackagePatterns": ["k3s"] - }, // commit message topics { "matchDatasources": ["helm"], @@ -149,23 +130,6 @@ "semanticCommitType": "fix", "semanticCommitScope": "helm" }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "matchUpdateTypes": ["major"], - "commitMessagePrefix": "feat(ansible)!: " - }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "matchUpdateTypes": ["minor"], - "semanticCommitType": "feat", - "semanticCommitScope": "ansible" - }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "matchUpdateTypes": ["patch"], - "semanticCommitType": "fix", - "semanticCommitScope": "ansible" - }, { "matchDatasources": ["github-releases", "github-tags"], "matchUpdateTypes": ["major"], @@ -221,10 +185,6 @@ "matchDatasources": ["helm"], "addLabels": ["renovate/helm"] }, - { - "matchDatasources": ["galaxy", "galaxy-collection"], - "addLabels": ["renovate/ansible"] - }, { "matchDatasources": ["github-releases", "github-tags"], "addLabels": ["renovate/github-release"] @@ -241,19 +201,12 @@ "description": ["Process custom dependencies"], "fileMatch": [ "(^|/).taskfiles/.+\\.ya?ml$", - "(^|/)ansible/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$", - "(^|/)partials/.+\\.ya?ml(?:\\.j2)?$" + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" ], "matchStrings": [ - // # renovate: datasource=github-releases depName=k3s-io/k3s - // k3s_release_version: &version v1.29.0+k3s1 // # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io // version: 1.15.1 - "datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\\n.+: (&\\S+\\s)?(?\\S+)", - // # renovate: datasource=github-releases depName=rancher/system-upgrade-controller - // https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml - "datasource=(?\\S+) depName=(?\\S+)\\n.+/(?(v|\\d)[^/]+)" + "datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\\n.+: (&\\S+\\s)?(?\\S+)" ], "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}" } diff --git a/.github/tests/config-k3s-ipv4.yaml b/.github/tests/config-k3s-ipv4.yaml deleted file mode 100644 index 7948fee58d2..00000000000 --- a/.github/tests/config-k3s-ipv4.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -skip_tests: true - -bootstrap_timezone: Etc/UTC -bootstrap_distribution: k3s -bootstrap_node_network: 10.10.10.0/24 -bootstrap_node_default_gateway: 10.10.10.1 -bootstrap_node_inventory: - - name: k8s-controller-0 - address: 10.10.10.100 - controller: true - ssh_user: fake - - name: k8s-worker-0 - address: 10.10.10.101 - controller: false - ssh_user: fake -bootstrap_dns_servers: ["1.1.1.1"] -bootstrap_search_domain: "fake" -bootstrap_pod_network: 10.69.0.0/16 -bootstrap_service_network: 10.96.0.0/16 -bootstrap_controllers_vip: 10.10.10.254 -bootstrap_tls_sans: ["fake"] -bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY -bootstrap_bgp: - enabled: false -bootstrap_github_address: https://github.com/onedr0p/cluster-template -bootstrap_github_branch: main -bootstrap_github_webhook_token: fake -bootstrap_cloudflare: - enabled: true - domain: fake - token: take - acme: - email: fake@example.com - production: false - tunnel: - account_id: fake - id: fake - secret: fake - ingress_vip: 10.10.10.252 - ingress_vip: 10.10.10.251 - gateway_vip: 10.10.10.253 diff --git a/.github/tests/config-k3s-ipv6.yaml b/.github/tests/config-k3s-ipv6.yaml deleted file mode 100644 index 5efa50c6c10..00000000000 --- a/.github/tests/config-k3s-ipv6.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -skip_tests: true - -bootstrap_timezone: Etc/UTC -bootstrap_distribution: k3s -bootstrap_node_network: 10.10.10.0/24 -bootstrap_node_default_gateway: 10.10.10.1 -bootstrap_node_inventory: - - name: k8s-controller-0 - address: 10.10.10.100 - controller: true - ssh_user: fake - - name: k8s-worker-0 - address: 10.10.10.101 - controller: false - ssh_user: fake -bootstrap_dns_servers: ["1.1.1.1"] -bootstrap_search_domain: "fake" -bootstrap_pod_network: 10.42.0.0/16,fd7f:8f5:e87c:a::/64 -bootstrap_service_network: 10.43.0.0/16,fd7f:8f5:e87c:e::/112 -bootstrap_controllers_vip: 10.10.10.254 -bootstrap_tls_sans: ["fake"] -bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY -bootstrap_bgp: - enabled: false -bootstrap_github_address: https://github.com/onedr0p/cluster-template -bootstrap_github_branch: main -bootstrap_github_webhook_token: fake -bootstrap_cloudflare: - enabled: true - domain: fake - token: take - acme: - email: fake@example.com - production: false - tunnel: - account_id: fake - id: fake - secret: fake - ingress_vip: 10.10.10.252 - ingress_vip: 10.10.10.251 - gateway_vip: 10.10.10.253 diff --git a/.github/tests/config-talos.yaml b/.github/tests/config-talos.yaml index 387c606d20e..3df4ce6ddc6 100644 --- a/.github/tests/config-talos.yaml +++ b/.github/tests/config-talos.yaml @@ -1,28 +1,26 @@ --- skip_tests: true -bootstrap_timezone: Etc/UTC -bootstrap_distribution: talos boostrap_talos: - schematic_id: "df491c50a5acc05b977ef00c32050e1ceb0df746e40b33c643ac8a9bfb7c7263" + schematic_id: "376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba" bootstrap_node_network: 10.10.10.0/24 bootstrap_node_default_gateway: 10.10.10.1 bootstrap_node_inventory: - name: k8s-controller-0 address: 10.10.10.100 controller: true - talos_disk: fake - talos_nic: fake + disk: fake + mac_addr: fake - name: k8s-worker-0 address: 10.10.10.101 controller: false - talos_disk: fake - talos_nic: fake -bootstrap_dns_servers: ["1.1.1.1"] -bootstrap_search_domain: "fake" + disk: fake + mac_addr: fake +bootstrap_dns_servers: ["1.1.1.1", "1.0.0.1"] +bootstrap_dntp_servers: ["time.cloudflare.com"] bootstrap_pod_network: 10.69.0.0/16 bootstrap_service_network: 10.96.0.0/16 -bootstrap_controllers_vip: 10.10.10.254 +bootstrap_controller_vip: 10.10.10.254 bootstrap_tls_sans: ["fake"] bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY bootstrap_bgp: diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 1ebbb139314..abf247d034a 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -20,8 +20,6 @@ jobs: fail-fast: false matrix: config-files: - - k3s-ipv4 - - k3s-ipv6 - talos steps: - name: Checkout @@ -93,14 +91,6 @@ jobs: task talos:bootstrap-gensecret task talos:bootstrap-genconfig - - name: Run Ansible tasks - if: ${{ startsWith(matrix.config-files, 'k3s') }} - shell: bash - run: | - task ansible:deps force=false - task ansible:lint - task ansible:list - - name: Run repo clean and reset tasks shell: bash run: | diff --git a/.taskfiles/Ansible/Taskfile.yaml b/.taskfiles/Ansible/Taskfile.yaml deleted file mode 100644 index 02322eafaee..00000000000 --- a/.taskfiles/Ansible/Taskfile.yaml +++ /dev/null @@ -1,88 +0,0 @@ ---- -# yaml-language-server: $schema=https://taskfile.dev/schema.json -version: "3" - -vars: - ANSIBLE_LINT_FILE: "{{.ANSIBLE_DIR}}/.ansible-lint" - ANSIBLE_INVENTORY_FILE: "{{.ANSIBLE_DIR}}/inventory/hosts.yaml" - ANSIBLE_REQUIREMENTS_FILE: "{{.ANSIBLE_DIR}}/requirements.yaml" - ANSIBLE_PIP_REQUIREMENTS_FILE: "{{.ANSIBLE_DIR}}/requirements.txt" - -env: - ANSIBLE_COLLECTIONS_PATH: "{{.VIRTUAL_ENV}}/galaxy" - ANSIBLE_ROLES_PATH: "{{.VIRTUAL_ENV}}/galaxy/ansible_roles" - ANSIBLE_VARS_ENABLED: "host_group_vars" - ANSIBLE_LOCALHOST_WARNING: "False" - ANSIBLE_INVENTORY_UNPARSED_WARNING: "False" - -tasks: - - deps: - desc: Set up Ansible dependencies - deps: [":workstation:venv"] - cmds: - - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade --requirement "{{.ANSIBLE_PIP_REQUIREMENTS_FILE}}"' - - '{{.VIRTUAL_ENV}}/bin/ansible-galaxy install --role-file "{{.ANSIBLE_REQUIREMENTS_FILE}}" {{if eq .force "true"}}--force{{end}}' - preconditions: - - { msg: "Missing Ansible requirements file", sh: "test -f {{.ANSIBLE_REQUIREMENTS_FILE}}" } - - { msg: "Missing Pip requirements file", sh: "test -f {{.ANSIBLE_PIP_REQUIREMENTS_FILE}}" } - sources: - - "{{.ANSIBLE_REQUIREMENTS_FILE}}" - - "{{.ANSIBLE_PIP_REQUIREMENTS_FILE}}" - generates: - - "{{.VIRTUAL_ENV}}/bin/ansible" - - "{{.VIRTUAL_ENV}}/bin/ansible-galaxy" - vars: - force: '{{.force | default "true"}}' - - run: - desc: Run an Ansible playbook for configuring a cluster - summary: | - Args: - playbook: Playbook to run (required) - prompt: Run Ansible playbook '{{.playbook}}'... continue? - deps: ["deps"] - cmd: "{{.VIRTUAL_ENV}}/bin/ansible-playbook --inventory {{.ANSIBLE_INVENTORY_FILE}} {{.ANSIBLE_DIR}}/playbooks/{{.playbook}}.yaml {{.CLI_ARGS}}" - requires: - vars: ["playbook"] - preconditions: - - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } - - poweroff: - desc: Shutdown all the k8s nodes - deps: ["deps"] - cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} -a '/usr/bin/systemctl poweroff' --become" - preconditions: - - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } - - list: - desc: List all the hosts - deps: ["deps"] - cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --list-hosts" - preconditions: - - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } - - ping: - desc: Ping all the hosts - deps: ["deps"] - cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --one-line -m 'ping'" - preconditions: - - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } - - uptime: - desc: Uptime of all the hosts - deps: ["deps"] - cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --one-line -a 'uptime'" - preconditions: - - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } - - lint: - desc: Lint Ansible - deps: ["deps"] - cmd: "{{.VIRTUAL_ENV}}/bin/ansible-lint --config-file {{.ANSIBLE_LINT_FILE}} {{.ANSIBLE_DIR}}/**/*.yaml" - preconditions: - - { msg: "Missing Ansible lint file", sh: "test -f {{.ANSIBLE_LINT_FILE}}" } - - .reset: - internal: true - cmd: rm -rf {{.ANSIBLE_DIR}} diff --git a/.taskfiles/Flux/Taskfile.yaml b/.taskfiles/Flux/Taskfile.yaml index 8f0c95ba5bd..0dce3c53b98 100644 --- a/.taskfiles/Flux/Taskfile.yaml +++ b/.taskfiles/Flux/Taskfile.yaml @@ -3,8 +3,6 @@ version: "3" vars: - # renovate: datasource=github-releases depName=prometheus-operator/prometheus-operator - PROMETHEUS_OPERATOR_VERSION: v0.74.0 CLUSTER_SECRET_SOPS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-secrets.sops.yaml" CLUSTER_SETTINGS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-settings.yaml" GITHUB_DEPLOY_KEY_FILE: "{{.KUBERNETES_DIR}}/bootstrap/flux/github-deploy-key.sops.yaml" @@ -14,18 +12,16 @@ tasks: bootstrap: desc: Bootstrap Flux into a Kubernetes cluster cmds: - - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml - - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml - - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml - - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/bootstrap/flux - cat {{.AGE_FILE}} | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin - sops --decrypt {{.CLUSTER_SECRET_SOPS_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename {{.CLUSTER_SETTINGS_FILE}} - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/flux/config preconditions: - - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } - - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + - msg: Missing kubeconfig + sh: test -f {{.KUBECONFIG_FILE}} + - msg: Missing Sops Age key file + sh: test -f {{.AGE_FILE}} apply: desc: Apply a Flux Kustomization resource for a cluster @@ -49,20 +45,26 @@ tasks: ks: sh: flux --kubeconfig {{.KUBECONFIG_FILE}} --namespace {{.ns}} get kustomizations $(basename {{.path}}) 2>&1 preconditions: - - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } - - { msg: "Missing Flux Kustomization for app {{.path}}", sh: "test -f {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml" } + - msg: Missing kubeconfig + sh: test -f {{.KUBECONFIG_FILE}} + - msg: Missing Flux Kustomization for app {{.path}} + sh: test -f {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml reconcile: desc: Force update Flux to pull in changes from your Git repository cmd: flux --kubeconfig {{.KUBECONFIG_FILE}} reconcile --namespace flux-system kustomization cluster --with-source preconditions: - - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - msg: Missing kubeconfig + sh: test -f {{.KUBECONFIG_FILE}} github-deploy-key: cmds: - kubectl create namespace flux-system --dry-run=client -o yaml | kubectl --kubeconfig {{.KUBECONFIG_FILE}} apply --filename - - sops --decrypt {{.GITHUB_DEPLOY_KEY_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - preconditions: - - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } - - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } - - { msg: "Missing Github deploy key file", sh: "test -f {{.GITHUB_DEPLOY_KEY_FILE}}" } + - msg: Missing kubeconfig + sh: test -f {{.KUBECONFIG_FILE}} + - msg: Missing Sops Age key file + sh: test -f {{.AGE_FILE}} + - msg: Missing Github deploy key file + sh: test -f {{.GITHUB_DEPLOY_KEY_FILE}} diff --git a/.taskfiles/Kubernetes/Taskfile.yaml b/.taskfiles/Kubernetes/Taskfile.yaml index e4f52e0cbc6..872746e69c4 100644 --- a/.taskfiles/Kubernetes/Taskfile.yaml +++ b/.taskfiles/Kubernetes/Taskfile.yaml @@ -28,7 +28,8 @@ tasks: desc: Validate Kubernetes manifests with kubeconform cmd: bash {{.KUBECONFORM_SCRIPT}} {{.KUBERNETES_DIR}} preconditions: - - { msg: "Missing kubeconform script", sh: "test -f {{.KUBECONFORM_SCRIPT}}" } + - msg: Missing kubeconform script + sh: test -f {{.KUBECONFORM_SCRIPT}} .reset: internal: true diff --git a/.taskfiles/Repository/Taskfile.yaml b/.taskfiles/Repository/Taskfile.yaml index e1e5f68c2ae..9e6bae3667b 100644 --- a/.taskfiles/Repository/Taskfile.yaml +++ b/.taskfiles/Repository/Taskfile.yaml @@ -20,14 +20,15 @@ tasks: # Update renovate.json5 - sed -i {{if eq OS "darwin"}}''{{end}} 's/(..\.j2)\?//g' {{.ROOT_DIR}}/.github/renovate.json5 preconditions: - - { msg: "Missing bootstrap directory", sh: "test -d {{.BOOTSTRAP_DIR}}" } - - { msg: "Missing Renovate config file", sh: "test -f {{.ROOT_DIR}}/.github/renovate.json5" } + - msg: Missing bootstrap directory + sh: test -d {{.BOOTSTRAP_DIR}} + - msg: Missing Renovate config file + sh: test -f {{.ROOT_DIR}}/.github/renovate.json5 reset: desc: Reset templated configuration files prompt: Reset templated configuration files... continue? cmds: - - task: :ansible:.reset - task: :kubernetes:.reset - task: :sops:.reset - task: :talos:.reset diff --git a/.taskfiles/Sops/Taskfile.yaml b/.taskfiles/Sops/Taskfile.yaml index 3739576576d..8aacdd4704a 100644 --- a/.taskfiles/Sops/Taskfile.yaml +++ b/.taskfiles/Sops/Taskfile.yaml @@ -10,8 +10,7 @@ tasks: age-keygen: desc: Initialize Age Key for Sops cmd: age-keygen --output {{.AGE_FILE}} - status: - - test -f "{{.AGE_FILE}}" + status: ["test -f {{.AGE_FILE}}"] encrypt: desc: Encrypt all Kubernetes SOPS secrets that are not already encrypted @@ -33,8 +32,10 @@ tasks: requires: vars: ["file"] preconditions: - - { msg: "Missing Sops config file", sh: "test -f {{.SOPS_CONFIG_FILE}}" } - - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + - msg: Missing Sops config file + sh: test -f {{.SOPS_CONFIG_FILE}} + - msg: Missing Sops Age key file + sh: test -f {{.AGE_FILE}} .reset: internal: true diff --git a/.taskfiles/Talos/Taskfile.yaml b/.taskfiles/Talos/Taskfile.yaml index 97133078e84..8791c0af6f0 100644 --- a/.taskfiles/Talos/Taskfile.yaml +++ b/.taskfiles/Talos/Taskfile.yaml @@ -32,25 +32,28 @@ tasks: - task: :sops:.encrypt-file vars: file: "{{.TALHELPER_SECRET_FILE}}" + status: ["test -f {{.TALHELPER_SECRET_FILE}}"] preconditions: - - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } - status: - - test -f "{{.TALHELPER_SECRET_FILE}}" + - msg: Missing talhelper config file + sh: test -f {{.TALHELPER_CONFIG_FILE}} bootstrap-genconfig: desc: Generate the Talos configs dir: "{{.TALOS_DIR}}" cmd: talhelper genconfig --secret-file {{.TALHELPER_SECRET_FILE}} preconditions: - - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } - - { msg: "Missing talhelper secret file", sh: "test -f {{.TALHELPER_SECRET_FILE}}" } + - msg: Missing talhelper config file + sh: test -f {{.TALHELPER_CONFIG_FILE}} + - msg: Missing talhelper secret file + sh: test -f {{.TALHELPER_SECRET_FILE}} bootstrap-apply: desc: Apply the Talos configs to the nodes dir: "{{.TALOS_DIR}}" cmd: talhelper gencommand apply --extra-flags=--insecure | bash preconditions: - - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + - msg: Missing talhelper config file + sh: test -f {{.TALHELPER_CONFIG_FILE}} bootstrap-install: desc: Install the Talos cluster @@ -60,17 +63,19 @@ tasks: - until talhelper gencommand bootstrap | bash; do sleep 10; done - sleep 10 preconditions: - - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + - msg: Missing talhelper config file + sh: test -f {{.TALHELPER_CONFIG_FILE}} bootstrap-apps: desc: Bootstrap core apps needed for Talos dir: "{{.TALOS_DIR}}" cmds: - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done - - helmfile --kubeconfig {{.KUBECONFIG_FILE}} --file ./apps/helmfile.yaml apply --skip-diff-on-install --suppress-diff + - helmfile --kubeconfig {{.KUBECONFIG_FILE}} --file helmfile.yaml apply --skip-diff-on-install --suppress-diff - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done preconditions: - - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - msg: Missing kubeconfig + sh: test -f {{.KUBECONFIG_FILE}} upgrade-talos: desc: Upgrade talos on a node @@ -78,7 +83,8 @@ tasks: requires: vars: ["node", "image"] preconditions: - - { msg: "Node not found", sh: "talosctl --nodes {{.node}} get machineconfig" } + - msg: Node not found + sh: talosctl --nodes {{.node}} get machineconfig upgrade-k8s: desc: Upgrade k8s on a node @@ -86,24 +92,19 @@ tasks: requires: vars: ["node", "to"] preconditions: - - { msg: "Node not found", sh: "talosctl --nodes {{.node}} get machineconfig" } + - msg: Node not found + sh: talosctl --nodes {{.node}} get machineconfig fetch-kubeconfig: desc: Generate talos kubeconfig dir: "{{.TALOS_DIR}}" cmd: until talhelper gencommand kubeconfig --extra-flags "{{.ROOT_DIR}} --force" | bash; do sleep 10; done - soft-nuke: + nuke: desc: Resets nodes back to maintenance mode so you can re-deploy again straight after prompt: This will destroy your cluster and reset the nodes back to maintenance mode... continue? dir: "{{.TALOS_DIR}}" - cmd: talhelper gencommand reset --extra-flags "--reboot --system-labels-to-wipe STATE --system-labels-to-wipe EPHEMERAL --graceful=false --wait=false" | bash - - hard-nuke: - desc: Resets nodes back completely and reboots them - prompt: This will destroy your cluster and reset the nodes... continue? - dir: "{{.TALOS_DIR}}" - cmd: talhelper gencommand reset --extra-flags "--reboot --graceful=false --wait=false" | bash + cmd: talhelper gencommand reset --extra-flags "--reboot {{- if eq .CLI_FORCE false }}--system-labels-to-wipe STATE --system-labels-to-wipe EPHEMERAL{{ end }} --graceful=false --wait=false" | bash .reset: internal: true diff --git a/.vscode/extensions.json b/.vscode/extensions.json index c8f112105f4..f4312e64e9f 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -5,7 +5,6 @@ "fcrespo82.markdown-table-formatter", "mikestead.dotenv", "mitchdenny.ecdc", - "redhat.ansible", "signageos.signageos-vscode-sops", "will-stone.in-any-case", "EditorConfig.editorconfig", diff --git a/.vscode/settings.json b/.vscode/settings.json index 8f29572b7a6..579ebc3936b 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,21 +1,10 @@ { - "ansible.ansible.path": ".venv/bin/ansible", - "ansible.python.activationScript": ".venv/bin/activate", - "ansible.python.interpreterPath": ".venv/bin/python3", - "ansible.validation.enabled": true, - "ansible.validation.lint.arguments": "-c ansible/.ansible-lint", - "ansible.validation.lint.enabled": true, - "ansible.validation.lint.path": ".venv/bin/ansible-lint", "files.associations": { "*.json5": "jsonc", - "./ansible/**/*.yaml": "ansible", - "./ansible/**/*.sops.yaml": "yaml", - "./ansible/**/inventory/**/*.yaml": "yaml", "./kubernetes/**/*.sops.toml": "plaintext" }, "sops.defaults.ageKeyFile": "age.key", "yaml.schemas": { - "ansible": "./ansible/*.yaml", "Kubernetes": "./kubernetes/*.yaml" }, "vs-kubernetes": { diff --git a/README.md b/README.md index 4a57ecfdd95..f7b43ae9e6c 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,12 @@ At a high level this project makes use of [makejinja](https://github.com/mirkole The features included will depend on the type of configuration you want to use. There are currently **2 different types** of **configurations** available with this template. -1. **"Flux cluster"** - a Kubernetes distribution of your choosing: [k3s](https://github.com/k3s-io/k3s) or [Talos](https://github.com/siderolabs/talos). Deploys an opinionated implementation of [Flux](https://github.com/fluxcd/flux2) using [GitHub](https://github.com/) as the Git provider and [sops](https://github.com/getsops/sops) to manage secrets. +1. **"Flux cluster"** - a Kubernetes cluster deployed on-top of [Talos Linux](https://github.com/siderolabs/talos) with an opinionated implementation of [Flux](https://github.com/fluxcd/flux2) using [GitHub](https://github.com/) as the Git provider and [sops](https://github.com/getsops/sops) to manage secrets. - - **Required:** Debian 12 or Talos Linux installed on bare metal (or VMs) and some knowledge of [Containers](https://opencontainers.org/) and [YAML](https://yaml.org/). Some knowledge of [Git](https://git-scm.com/) practices & terminology is also required. - - **Components:** [Cilium](https://github.com/cilium/cilium) and [kube-vip](https://github.com/kube-vip/kube-vip) _(k3s)_. [flux](https://github.com/fluxcd/flux2), [cert-manager](https://github.com/cert-manager/cert-manager), [spegel](https://github.com/spegel-org/spegel), [reloader](https://github.com/stakater/Reloader), [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) _(k3s)_, and [openebs](https://github.com/openebs/openebs). + - **Required:** Some knowledge of [Containers](https://opencontainers.org/), [YAML](https://yaml.org/), and [Git](https://git-scm.com/). + - **Components:** [flux](https://github.com/fluxcd/flux2), [Cilium](https://github.com/cilium/cilium),[cert-manager](https://github.com/cert-manager/cert-manager), [spegel](https://github.com/spegel-org/spegel), [reloader](https://github.com/stakater/Reloader), and [openebs](https://github.com/openebs/openebs). -3. **"Flux cluster with Cloudflare"** - An addition to "**Flux cluster**" that provides DNS and SSL with [Cloudflare](https://www.cloudflare.com/). [Cloudflare Tunnel](https://www.cloudflare.com/products/tunnel/) is also included to provide external access to certain applications deployed in your cluster. +2. **"Flux cluster with Cloudflare"** - An addition to "**Flux cluster**" that provides DNS and SSL with [Cloudflare](https://www.cloudflare.com/). [Cloudflare Tunnel](https://www.cloudflare.com/products/tunnel/) is also included to provide external access to certain applications deployed in your cluster. - **Required:** A Cloudflare account with a domain managed in your Cloudflare account. - **Components:** [ingress-nginx](https://github.com/kubernetes/ingress-nginx/), [external-dns](https://github.com/kubernetes-sigs/external-dns) and [cloudflared](https://github.com/cloudflare/cloudflared). @@ -25,113 +25,24 @@ The features included will depend on the type of configuration you want to use. ## 💻 Machine Preparation -Hopefully some of this peeked your interests! If you are marching forward, now is a good time to choose whether you will deploy a Kubernetes cluster with [k3s](https://github.com/k3s-io/k3s) or [Talos](https://github.com/siderolabs/talos). - ### System requirements > [!NOTE] -> 1. The included behaviour of Talos or k3s is that all nodes are able to run workloads, **including** the controller nodes. **Worker nodes** are therefore **optional**. +> 1. The included behaviour of Talos is that all nodes are able to run workloads, **including** the controller nodes. **Worker nodes** are therefore **optional**. > 2. Do you have 3 or more nodes? It is highly recommended to make 3 of them controller nodes for a highly available control plane. > 3. Running the cluster on Proxmox VE? My thoughts and recommendations about that are documented [here](https://onedr0p.github.io/home-ops/notes/proxmox-considerations.html). | Role | Cores | Memory | System Disk | |---------|----------|---------------|---------------------------| -| Control | 4 _(6*)_ | 8GB _(24GB*)_ | 100GB _(500GB*)_ SSD/NVMe | -| Worker | 4 _(6*)_ | 8GB _(24GB*)_ | 100GB _(500GB*)_ SSD/NVMe | +| Control | 4 _(6*)_ | 8GB _(24GB*)_ | 120GB _(500GB*)_ SSD/NVMe | +| Worker | 4 _(6*)_ | 8GB _(24GB*)_ | 120GB _(500GB*)_ SSD/NVMe | | _\* recommended_ | -### Talos - -1. Download the latest stable release of Talos from their [GitHub releases](https://github.com/siderolabs/talos/releases). You will want to grab either `metal-amd64.iso` or `metal-rpi_generic-arm64.raw.xz` depending on your system. - -2. Take note of the OS drive serial numbers you will need them later on. - -3. Flash the iso or raw file to a USB drive and boot to Talos on your nodes with it. - -4. Continue on to 🚀 [**Getting Started**](#-getting-started) - -### k3s (AMD64) - -1. Download the latest stable release of Debian from [here](https://cdimage.debian.org/debian-cd/current/amd64/iso-dvd), then follow [this guide](https://www.linuxtechi.com/how-to-install-debian-12-step-by-step) to get it installed. Deviations from the guide: - - ```txt - Choose "Guided - use entire disk" - Choose "All files in one partition" - Delete Swap partition - Uncheck all Debian desktop environment options - ``` - -2. [Post install] Remove CD/DVD as apt source - - ```sh - su - - sed -i '/deb cdrom/d' /etc/apt/sources.list - apt update - exit - ``` - -3. [Post install] Enable sudo for your non-root user - - ```sh - su - - apt update - apt install -y sudo - usermod -aG sudo ${username} - echo "${username} ALL=(ALL) NOPASSWD:ALL" | tee /etc/sudoers.d/${username} - exit - newgrp sudo - sudo apt update - ``` - -4. [Post install] Add SSH keys (or use `ssh-copy-id` on the client that is connecting) - - 📍 _First make sure your ssh keys are up-to-date and added to your github account as [instructed](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account)._ - - ```sh - mkdir -m 700 ~/.ssh - sudo apt install -y curl - curl https://github.com/${github_username}.keys > ~/.ssh/authorized_keys - chmod 600 ~/.ssh/authorized_keys - ``` - -### k3s (RasPi4) - -
-Click here to read about using a RasPi4 - - -> [!NOTE] -> 1. It is recommended to have an 8GB RasPi model. Most important is to **boot from an external SSD/NVMe** rather than an SD card. This is [supported natively](https://www.raspberrypi.com/documentation/computers/raspberry-pi.html), however if you have an early model you may need to [update the bootloader](https://www.tomshardware.com/how-to/boot-raspberry-pi-4-usb) first. -> 2. Check the [power requirements](https://www.raspberrypi.com/documentation/computers/raspberry-pi.html#power-supply) if using a PoE Hat and a SSD/NVMe dongle. - -1. Download the latest stable release of Debian from [here](https://raspi.debian.net/tested-images). _**Do not** use Raspbian or DietPi or any other flavor Linux OS._ - -2. Flash the image onto an SSD/NVMe drive. - -3. Re-mount the drive to your workstation and then do the following (per the [official documentation](https://raspi.debian.net/defaults-and-settings)): - - ```txt - Open 'sysconf.txt' in a text editor and save it upon updating the information below - - Change 'root_authorized_key' to your desired public SSH key - - Change 'root_pw' to your desired root password - - Change 'hostname' to your desired hostname - ``` - -4. Connect SSD/NVMe drive to the Raspberry Pi 4 and power it on. +1. Head over to and follow the instructions which will eventually lead you to download a Talos Linux iso file (or for SBCs the `.raw.xz`). Make sure to copy/paste the schematic ID you will need this later on. -5. [Post install] SSH into the device with the `root` user and then create a normal user account with `adduser ${username}` +2. Flash the iso or raw file to a USB drive and boot to Talos on your nodes with it. -6. [Post install] Follow steps 3 and 4 from [k3s (AMD64)](##k3s-amd64). - -7. [Post install] Install `python3` which is needed by Ansible. - - ```sh - sudo apt install -y python3 - ``` - -8. Continue on to 🚀 [**Getting Started**](#-getting-started) - -
+3. Continue on to 🚀 [**Getting Started**](#-getting-started) ## 🚀 Getting Started @@ -188,13 +99,14 @@ You have two different options for setting up your local workstation. ``` 📍 _**Verify** that `direnv` is setup properly by opening a new terminal and `cd`ing into your repository. You should see something like:_ + ```sh cd /path/to/repo direnv: loading /path/to/repo/.envrc direnv: export +ANSIBLE_COLLECTIONS_PATH ... +VIRTUAL_ENV ~PATH ``` -6. Install the additional **required** CLI tools +4. Install the additional **required** CLI tools 📍 _**Not using Homebrew or ArchLinux?** Try using the generic Linux task below, if that fails check out the [Brewfile](.taskfiles/Workstation/Brewfile)/[Archfile](.taskfiles/Workstation/Archfile) for what CLI tools needed and install them._ @@ -207,7 +119,7 @@ You have two different options for setting up your local workstation. task workstation:generic-linux ``` -7. Setup a Python virual environment by running the following task command. +5. Setup a Python virual environment by running the following task command. 📍 _This commands requires Python 3.11+ to be installed._ @@ -215,7 +127,7 @@ You have two different options for setting up your local workstation. task workstation:venv ``` -8. Continue on to 🔧 [**Stage 3**](#-stage-3-bootstrap-configuration) +6. Continue on to 🔧 [**Stage 3**](#-stage-3-bootstrap-configuration) ### 🔧 Stage 3: Bootstrap configuration @@ -246,43 +158,7 @@ You have two different options for setting up your local workstation. git push ``` -5. Continue on to ⚡ [**Stage 4**](#-stage-4-prepare-your-nodes-for-kubernetes) - -### ⚡ Stage 4: Prepare your nodes for Kubernetes - -> [!NOTE] -> For **Talos** skip ahead to ⛵ [**Stage 5**](#-stage-5-install-kubernetes) - -#### k3s - -📍 _Here we will be running an Ansible playbook to prepare your nodes for running a Kubernetes cluster._ - -1. Ensure you are able to SSH into your nodes from your workstation using a private SSH key **without a passphrase** (for example using a SSH agent). This lets Ansible interact with your nodes. - -3. Install the Ansible dependencies - - ```sh - task ansible:deps - ``` - -4. Verify Ansible can view your config and ping your nodes - - ```sh - task ansible:list - task ansible:ping - ``` - -5. Run the Ansible prepare playbook (nodes wil reboot when done) - - ```sh - task ansible:run playbook=cluster-prepare - ``` - -6. Continue on to ⛵ [**Stage 5**](#-stage-5-install-kubernetes) - -### ⛵ Stage 5: Install Kubernetes - -#### Talos +### ⛵ Stage 4: Install Kubernetes 1. Deploy your cluster and bootstrap it. This generates secrets, generates the config files for your nodes and applies them. It bootstraps the cluster afterwards, fetches the kubeconfig file and installs Cilium and kubelet-csr-approver. It finishes with some health checks. @@ -292,14 +168,6 @@ You have two different options for setting up your local workstation. 2. ⚠️ It might take a while for the cluster to be setup (10+ minutes is normal), during which time you will see a variety of error messages like: "couldn't get current server API group list," "error: no matching resources found", etc. This is a normal. If this step gets interrupted, e.g. by pressing Ctrl + C, you likely will need to [nuke the cluster](#-Nuke) before trying again. -#### k3s - -1. Install Kubernetes depending on the distribution you chose - - ```sh - task ansible:run playbook=cluster-installation - ``` - #### Cluster validation 1. The `kubeconfig` for interacting with your cluster should have been created in the root of your repository. @@ -324,8 +192,8 @@ You have two different options for setting up your local workstation. ```sh flux check --pre # ► checking prerequisites - # ✔ kubectl 1.27.3 >=1.18.0-0 - # ✔ Kubernetes 1.27.3+k3s1 >=1.16.0-0 + # ✔ kubectl 1.30.1 >=1.18.0-0 + # ✔ Kubernetes 1.30.1 >=1.16.0-0 # ✔ prerequisites checks passed ``` @@ -424,15 +292,10 @@ By default Flux will periodically check your git repository for changes. In orde ## 💥 Nuke -There might be a situation where you want to destroy your Kubernetes cluster. This will completely clean the OS of all traces of the Kubernetes distribution you chose and then reboot the nodes. +There might be a situation where you want to destroy your Kubernetes cluster. The following command will reset your nodes back to maintenance mode, append `--force` to completely format your the Talos installation. Either way the nodes should reboot after the command has run. ```sh -# k3s: Remove all traces of k3s from the nodes -task ansible:run playbook=cluster-nuke -# Talos: Reset your nodes back to maintenance mode and reboot -task talos:soft-nuke -# Talos: Comletely format your the Talos installation and reboot -task talos:hard-nuke +task talos:nuke ``` ## 🤖 Renovate diff --git a/Taskfile.yaml b/Taskfile.yaml index c504e74fb3e..cecceffac53 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -4,7 +4,6 @@ version: "3" vars: # Directories - ANSIBLE_DIR: "{{.ROOT_DIR}}/ansible" BOOTSTRAP_DIR: "{{.ROOT_DIR}}/bootstrap" KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes" PRIVATE_DIR: "{{.ROOT_DIR}}/.private" @@ -25,7 +24,6 @@ env: VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv" includes: - ansible: .taskfiles/Ansible/Taskfile.yaml kubernetes: aliases: ["k8s"] taskfile: .taskfiles/Kubernetes/Taskfile.yaml @@ -51,13 +49,12 @@ tasks: - cmd: echo === Configuration file copied === - cmd: echo Proceed with updating the configuration files... - cmd: echo {{.BOOTSTRAP_CONFIG_FILE}} - status: - - test -f "{{.BOOTSTRAP_CONFIG_FILE}}" + status: ["test -f {{.BOOTSTRAP_CONFIG_FILE}}"] silent: true configure: desc: Configure repository from bootstrap vars - prompt: Any conflicting config in the root kubernetes and ansible directories will be overwritten... continue? + prompt: Any conflicting config in the kubernetes directory will be overwritten... continue? deps: ["workstation:direnv", "workstation:venv", "sops:age-keygen", "init"] cmds: - task: .template diff --git a/bootstrap/overrides/readme.partial.yaml.j2 b/bootstrap/overrides/readme.partial.yaml.j2 index 36dac44d333..b73f7538b83 100644 --- a/bootstrap/overrides/readme.partial.yaml.j2 +++ b/bootstrap/overrides/readme.partial.yaml.j2 @@ -1,5 +1,5 @@ -<% Place user jinja template overrides in this file's directory %> -<% Docs: https://mirkolenz.github.io/makejinja/makejinja.html %> -<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/makejinja.toml %> -<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input1/not-empty.yaml.jinja %> -<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input2/not-empty.yaml.jinja %> +#| Place user jinja template overrides in this file's directory |# +#| Docs: https://mirkolenz.github.io/makejinja/makejinja.html |# +#| Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/makejinja.toml |# +#| Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input1/not-empty.yaml.jinja |# +#| Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input2/not-empty.yaml.jinja |# diff --git a/bootstrap/scripts/validation.py b/bootstrap/scripts/validation.py index f0bd685ac42..b3a75a07ce6 100644 --- a/bootstrap/scripts/validation.py +++ b/bootstrap/scripts/validation.py @@ -7,9 +7,7 @@ import socket import sys -DISTRIBUTIONS = ["k3s", "talos"] -GLOBAL_CLI_TOOLS = ["age", "flux", "helmfile", "sops", "jq", "kubeconform", "kustomize"] -TALOS_CLI_TOOLS = ["talosctl", "talhelper"] +GLOBAL_CLI_TOOLS = ["age", "flux", "helmfile", "sops", "jq", "kubeconform", "kustomize", "talosctl", "talhelper"] CLOUDFLARE_TOOLS = ["cloudflared"] @@ -51,67 +49,46 @@ def validate_network(cidr: str, family: int) -> str: return cidr -def validate_node(node: dict, node_cidr: str, distribution: str) -> None: +def validate_node(node: dict, node_cidr: str) -> None: if not node.get("name"): raise ValueError(f"A node is missing a name") - if not re.match(r"^[a-z0-9-\.]+$", node.get('name')): + if not re.match(r"^[a-z0-9-]+$", node.get('name')): raise ValueError(f"Node {node.get('name')} has an invalid name") - if distribution in ["k3s"]: - if not node.get("ssh_user") : - raise ValueError(f"Node {node.get('name')} is missing ssh_user") - if distribution in ["talos"]: - if not node.get("talos_disk"): - raise ValueError(f"Node {node.get('name')} is missing talos_disk") - if not node.get("talos_nic"): - raise ValueError(f"Node {node.get('name')} is missing talos_nic") - if not re.match(r"(?:[0-9a-fA-F]:?){12}", node.get("talos_nic")): - raise ValueError(f"Node {node.get('name')} has an invalid talos_nic, is this a MAC address?") - ip = validate_ip(node.get("address")) - if netaddr.IPAddress(ip, 4) not in netaddr.IPNetwork(node_cidr): - raise ValueError(f"Node {node.get('name')} is not in the node CIDR {node_cidr}") - port = 50000 if distribution in ["talos"] else 22 - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: - sock.settimeout(5) - result = sock.connect_ex((ip, port)) - if result != 0: - raise ValueError(f"Node {node.get('name')} port {port} is not open") - - -@required("bootstrap_distribution", "bootstrap_cloudflare") -def validate_cli_tools(distribution: str, cloudflare: dict, **_) -> None: - if distribution not in DISTRIBUTIONS: - raise ValueError(f"Invalid distribution {distribution}") + if not node.get("disk"): + raise ValueError(f"Node {node.get('name')} is missing disk") + if not node.get("mac_addr"): + raise ValueError(f"Node {node.get('name')} is missing mac_addr") + if not re.match(r"(?:[0-9a-fA-F]:?){12}", node.get("mac_addr")): + raise ValueError(f"Node {node.get('name')} has an invalid mac_addr, is this a MAC address?") + if node.get("address"): + ip = validate_ip(node.get("address")) + if netaddr.IPAddress(ip, 4) not in netaddr.IPNetwork(node_cidr): + raise ValueError(f"Node {node.get('name')} is not in the node CIDR {node_cidr}") + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.settimeout(5) + result = sock.connect_ex((ip, 50000)) + if result != 0: + raise ValueError(f"Node {node.get('name')} port 50000 is not open") + + +@required("bootstrap_cloudflare") +def validate_cli_tools(cloudflare: dict, **_) -> None: for tool in GLOBAL_CLI_TOOLS: if not which(tool): raise ValueError(f"Missing required CLI tool {tool}") - for tool in TALOS_CLI_TOOLS if distribution in ["talos"] else []: - if not which(tool): - raise ValueError(f"Missing required CLI tool {tool}") for tool in CLOUDFLARE_TOOLS if cloudflare.get("enabled", False) else []: if not which(tool): raise ValueError(f"Missing required CLI tool {tool}") -@required("bootstrap_distribution") -def validate_distribution(distribution: str, **_) -> None: - if distribution not in DISTRIBUTIONS: - raise ValueError(f"Invalid distribution {distribution}") - - -@required("bootstrap_timezone") -def validate_timezone(timezone: str, **_) -> None: - if timezone not in available_timezones(): - raise ValueError(f"Invalid timezone {timezone}") - - @required("bootstrap_sops_age_pubkey") def validate_age(key: str, **_) -> None: if not re.match(r"^age1[a-z0-9]{0,58}$", key): raise ValueError(f"Invalid Age public key {key}") -@required("bootstrap_node_network", "bootstrap_node_inventory", "bootstrap_distribution") -def validate_nodes(node_cidr: str, nodes: dict[list], distribution: str, **_) -> None: +@required("bootstrap_node_network", "bootstrap_node_inventory") +def validate_nodes(node_cidr: str, nodes: dict[list], **_) -> None: node_cidr = validate_network(node_cidr, 4) controllers = [node for node in nodes if node.get('controller') == True] @@ -120,18 +97,16 @@ def validate_nodes(node_cidr: str, nodes: dict[list], distribution: str, **_) -> if len(controllers) % 2 == 0: raise ValueError(f"Must have an odd number of controller nodes") for node in controllers: - validate_node(node, node_cidr, distribution) + validate_node(node, node_cidr) workers = [node for node in nodes if node.get('controller') == False] for node in workers: - validate_node(node, node_cidr, distribution) + validate_node(node, node_cidr) def validate(data: dict) -> None: validate_python_version() validate_cli_tools(data) - validate_distribution(data) - validate_timezone(data) validate_age(data) if not data.get("skip_tests", False): diff --git a/bootstrap/templates/.sops.yaml.j2 b/bootstrap/templates/.sops.yaml.j2 index 4cec526149f..cb7aa76495c 100644 --- a/bootstrap/templates/.sops.yaml.j2 +++ b/bootstrap/templates/.sops.yaml.j2 @@ -1,20 +1,12 @@ --- creation_rules: - #% if bootstrap_distribution in ["talos"] %# - # IMPORTANT: This rule MUST be above the others path_regex: talos/.*\.sops\.ya?ml key_groups: - age: - "#{ bootstrap_sops_age_pubkey }#" - #% endif %# - path_regex: kubernetes/.*\.sops\.ya?ml encrypted_regex: "^(data|stringData)$" key_groups: - age: - "#{ bootstrap_sops_age_pubkey }#" - #% if bootstrap_distribution in ["k3s"] %# - - path_regex: ansible/.*\.sops\.ya?ml - key_groups: - - age: - - "#{ bootstrap_sops_age_pubkey }#" - #% endif %# diff --git a/bootstrap/templates/ansible/.ansible-lint.j2 b/bootstrap/templates/ansible/.ansible-lint.j2 deleted file mode 100644 index 36f6b441462..00000000000 --- a/bootstrap/templates/ansible/.ansible-lint.j2 +++ /dev/null @@ -1,9 +0,0 @@ -skip_list: - - yaml[commas] - - yaml[line-length] - - var-naming -warn_list: - - command-instead-of-shell - - deprecated-command-syntax - - experimental - - no-changed-when diff --git a/bootstrap/templates/ansible/.mjfilter.py b/bootstrap/templates/ansible/.mjfilter.py deleted file mode 100644 index 0979f9a644c..00000000000 --- a/bootstrap/templates/ansible/.mjfilter.py +++ /dev/null @@ -1 +0,0 @@ -main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 deleted file mode 100644 index 875c4c615bc..00000000000 --- a/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 +++ /dev/null @@ -1,36 +0,0 @@ ---- -k3s_control_node: true -k3s_server: - #% if bootstrap_feature_gates.dual_stack_ipv4_first %# - cluster-cidr: "#{ bootstrap_pod_network.split(',')[0] }#,#{ bootstrap_pod_network.split(',')[1] }#" - service-cidr: "#{ bootstrap_service_network.split(',')[0] }#,#{ bootstrap_service_network.split(',')[1] }#" - #% else %# - cluster-cidr: "#{ bootstrap_pod_network }#" - service-cidr: "#{ bootstrap_service_network }#" - #% endif %# - disable: ["flannel", "local-storage", "metrics-server", "servicelb", "traefik"] - disable-cloud-controller: true - disable-kube-proxy: true - disable-network-policy: true - docker: false - embedded-registry: true - etcd-expose-metrics: true - flannel-backend: "none" - kube-apiserver-arg: - - "anonymous-auth=true" - kube-controller-manager-arg: - - "bind-address=0.0.0.0" - kube-scheduler-arg: - - "bind-address=0.0.0.0" - #% if bootstrap_feature_gates.dual_stack_ipv4_first %# - node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}" - #% else %# - node-ip: "{{ ansible_host }}" - #% endif %# - secrets-encryption: true - tls-san: - - "#{ bootstrap_controllers_vip }#" - #% for item in bootstrap_tls_sans %# - - "#{ item }#" - #% endfor %# - write-kubeconfig-mode: "644" diff --git a/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 deleted file mode 100644 index fcac87dc7ab..00000000000 --- a/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 +++ /dev/null @@ -1,23 +0,0 @@ ---- -k3s_become: true -k3s_etcd_datastore: true -k3s_install_hard_links: true -k3s_registration_address: "#{ bootstrap_controllers_vip }#" -k3s_registries: - mirrors: - docker.io: - gcr.io: - ghcr.io: - k8s.gcr.io: - lscr.io: - mcr.microsoft.com: - public.ecr.aws: - quay.io: - registry.k8s.io: -# renovate: datasource=github-releases depName=k3s-io/k3s -k3s_release_version: v1.30.0+k3s1 -k3s_server_manifests_templates: - - custom-cilium-helmchart.yaml - - custom-kube-vip-ds.yaml - - custom-kube-vip-rbac.yaml -k3s_use_unsupported_config: true diff --git a/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py b/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py deleted file mode 100644 index 8fb17eac580..00000000000 --- a/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py +++ /dev/null @@ -1,10 +0,0 @@ -main = lambda data: ( - data.get("bootstrap_distribution", "k3s") in ["k3s"] and - len( - list( - filter( - lambda item: "controller" in item and item["controller"] is False, data.get("bootstrap_node_inventory") - ) - ) - ) > 0 -) diff --git a/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 deleted file mode 100644 index 61622c98b58..00000000000 --- a/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 +++ /dev/null @@ -1,8 +0,0 @@ ---- -k3s_control_node: false -k3s_agent: - #% if bootstrap_feature_gates.dual_stack_ipv4_first %# - node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}" - #% else %# - node-ip: "{{ ansible_host }}" - #% endif %# diff --git a/bootstrap/templates/ansible/inventory/hosts.yaml.j2 b/bootstrap/templates/ansible/inventory/hosts.yaml.j2 deleted file mode 100644 index 4df83a2804b..00000000000 --- a/bootstrap/templates/ansible/inventory/hosts.yaml.j2 +++ /dev/null @@ -1,29 +0,0 @@ ---- -kubernetes: - children: - controllers: - hosts: - #% for item in bootstrap_node_inventory %# - #% if item.controller %# - "#{ item.name }#": - ansible_user: "#{ item.ssh_user }#" - ansible_host: "#{ item.address }#" - #% if item.ssh_key %# - ansible_ssh_private_key_file: "#{ item.ssh_key }#" - #% endif %# - #% endif %# - #% endfor %# - #% if bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length %# - workers: - hosts: - #% for item in bootstrap_node_inventory %# - #% if not item.controller %# - "#{ item.name }#": - ansible_user: "#{ item.ssh_user }#" - ansible_host: "#{ item.address }#" - #% if item.ssh_key %# - ansible_ssh_private_key_file: "#{ item.ssh_key }#" - #% endif %# - #% endif %# - #% endfor %# - #% endif %# diff --git a/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 deleted file mode 100644 index 507b7b29521..00000000000 --- a/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 +++ /dev/null @@ -1,91 +0,0 @@ ---- -- name: Cluster Installation - hosts: kubernetes - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Check if cluster is installed - check_mode: false - ansible.builtin.stat: - path: /etc/rancher/k3s/config.yaml - register: k3s_installed - - - name: Ignore manifests templates and urls if the cluster is already installed - when: k3s_installed.stat.exists - ansible.builtin.set_fact: - k3s_server_manifests_templates: [] - k3s_server_manifests_urls: [] - - - name: Prevent downgrades - when: k3s_installed.stat.exists - ansible.builtin.include_tasks: tasks/version-check.yaml - - - name: Ensure that the /opt/cni directory exists - ansible.builtin.file: - path: /opt/cni - mode: '755' - state: directory - - name: Ensure that the /opt/cni/bin is a link to /var/lib/rancher/k3s/data/current/bin - ansible.builtin.file: - src: /var/lib/rancher/k3s/data/current/bin - dest: /opt/cni/bin - follow: false - force: true - state: link - - - name: Ensure that the /etc/cni directory exists - ansible.builtin.file: - path: /etc/cni - mode: '755' - state: directory - - name: Ensure that the /var/lib/rancher/k3s/agent/etc/cni/net.d directory exists - ansible.builtin.file: - path: /var/lib/rancher/k3s/agent/etc/cni/net.d - mode: '755' - state: directory - - name: Ensure that the /etc/cni/net.d is a link to /var/lib/rancher/k3s/agent/etc/cni/net.d - ansible.builtin.file: - src: /var/lib/rancher/k3s/agent/etc/cni/net.d - dest: /etc/cni/net.d - force: true - state: link - - - name: Install Kubernetes - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: installed - - - name: Kubeconfig - ansible.builtin.include_tasks: tasks/kubeconfig.yaml - - - name: Wait for custom manifests to rollout - when: - - k3s_primary_control_node - - (k3s_server_manifests_templates | length > 0 - or k3s_server_manifests_urls | length > 0) - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: "{{ item.kind }}" - name: "{{ item.name }}" - namespace: "{{ item.namespace | default('') }}" - wait: true - wait_sleep: 10 - wait_timeout: 360 - loop: - - { name: cilium, kind: HelmChart, namespace: kube-system } - - { name: kube-vip, kind: DaemonSet, namespace: kube-system } - - - name: Cilium - when: k3s_primary_control_node - ansible.builtin.include_tasks: tasks/cilium.yaml - - - name: Cruft - when: k3s_primary_control_node - ansible.builtin.include_tasks: tasks/cruft.yaml diff --git a/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 deleted file mode 100644 index 415e98ed06b..00000000000 --- a/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 +++ /dev/null @@ -1,105 +0,0 @@ ---- -- name: Cluster Nuke - hosts: kubernetes - become: true - gather_facts: true - any_errors_fatal: true - vars_prompt: - - name: nuke - prompt: |- - Are you sure you want to nuke this cluster? - Type 'YES I WANT TO DESTROY THIS CLUSTER' to proceed - default: "n" - private: false - pre_tasks: - - name: Check for confirmation - ansible.builtin.fail: - msg: Aborted nuking the cluster - when: nuke != 'YES I WANT TO DESTROY THIS CLUSTER' - - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Stop Kubernetes # noqa: ignore-errors - ignore_errors: true - block: - - name: Stop Kubernetes - ansible.builtin.include_role: - name: xanmanning.k3s - public: true - vars: - k3s_state: stopped - - # https://github.com/k3s-io/docs/blob/main/docs/installation/network-options.md - - name: Networking - block: - - name: Networking | Delete Cilium links - ansible.builtin.command: - cmd: "ip link delete {{ item }}" - removes: "/sys/class/net/{{ item }}" - loop: ["cilium_host", "cilium_net", "cilium_vxlan"] - - name: Networking | Flush iptables - ansible.builtin.iptables: - table: "{{ item }}" - flush: true - loop: ["filter", "nat", "mangle", "raw"] - - name: Networking | Flush ip6tables - ansible.builtin.iptables: - table: "{{ item }}" - flush: true - ip_version: ipv6 - loop: ["filter", "nat", "mangle", "raw"] - - name: Networking | Delete CNI bin link - ansible.builtin.file: - path: /opt/cni/bin - state: absent - - name: Networking | Delete CNI conf link - ansible.builtin.file: - path: /etc/cni/net.d - state: absent - - - name: Check to see if k3s-killall.sh exits - ansible.builtin.stat: - path: /usr/local/bin/k3s-killall.sh - register: check_k3s_killall_script - - - name: Check to see if k3s-uninstall.sh exits - ansible.builtin.stat: - path: /usr/local/bin/k3s-uninstall.sh - register: check_k3s_uninstall_script - - - name: Run k3s-killall.sh - when: check_k3s_killall_script.stat.exists - ansible.builtin.command: - cmd: /usr/local/bin/k3s-killall.sh - register: k3s_killall - changed_when: k3s_killall.rc == 0 - - - name: Run k3s-uninstall.sh - when: check_k3s_uninstall_script.stat.exists - ansible.builtin.command: - cmd: /usr/local/bin/k3s-uninstall.sh - args: - removes: /usr/local/bin/k3s-uninstall.sh - register: k3s_uninstall - changed_when: k3s_uninstall.rc == 0 - - - name: Ensure hard links are removed - when: - - k3s_install_hard_links - - not ansible_check_mode - ansible.builtin.file: - path: "{{ k3s_install_dir }}/{{ item }}" - state: absent - loop: ["kubectl", "crictl", "ctr"] - - - name: Remove local storage path - ansible.builtin.file: - path: /var/openebs/local - state: absent - - - name: Reboot - ansible.builtin.reboot: - msg: Rebooting hosts - reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 deleted file mode 100644 index 126138c4f6a..00000000000 --- a/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 +++ /dev/null @@ -1,115 +0,0 @@ ---- -- name: Prepare System - hosts: kubernetes - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - - name: Populate service facts - ansible.builtin.service_facts: - tasks: - - name: Locale - block: - - name: Locale | Set timezone - community.general.timezone: - name: "#{ bootstrap_timezone }#" - - - name: Packages - block: - - name: Packages | Install - ansible.builtin.apt: - name: apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk,gnupg,hdparm,htop, - iptables,iputils-ping,ipvsadm,libseccomp2,lm-sensors,net-tools,nfs-common, - nvme-cli,open-iscsi,parted,psmisc,python3,python3-apt,python3-kubernetes,python3-yaml, - smartmontools,socat,software-properties-common,unzip,util-linux - install_recommends: false - - - name: Network Configuration - notify: Reboot - block: - - name: Network Configuration | Set hostname - ansible.builtin.hostname: - name: "{{ inventory_hostname }}" - - name: Network Configuration | Update hosts - ansible.builtin.copy: - content: | - 127.0.0.1 localhost - 127.0.1.1 {{ inventory_hostname }} - - # The following lines are desirable for IPv6 capable hosts - ::1 localhost ip6-localhost ip6-loopback - ff02::1 ip6-allnodes - ff02::2 ip6-allrouters - dest: /etc/hosts - mode: preserve - # https://github.com/onedr0p/cluster-template/discussions/635 - - name: Network Configuration | Remove immutable flag from /etc/resolv.conf - ansible.builtin.file: - attributes: -i - path: /etc/resolv.conf - - name: Network Configuration | Remove /etc/resolv.conf - ansible.builtin.file: - attributes: -i - path: /etc/resolv.conf - state: absent - - name: Network Configuration | Add custom /etc/resolv.conf - ansible.builtin.copy: - attributes: +i - mode: '0644' - dest: /etc/resolv.conf - content: | - search #{ bootstrap_search_domain|default('.', true) }# - #% for item in bootstrap_dns_servers | default(['1.1.1.1', '1.0.0.1']) %# - nameserver #{ item }# - #% endfor %# - - - name: System Configuration - notify: Reboot - block: - - name: System Configuration | Disable apparmor - when: ansible_facts.services['apparmor.service'] is defined - ansible.builtin.systemd: - name: apparmor - state: stopped - masked: true - - name: System Configuration | Disable swap - ansible.posix.mount: - name: "{{ item }}" - fstype: swap - state: absent - loop: ["none", "swap"] - - name: System Configuration | Create Kernel modules - ansible.builtin.copy: - dest: "/etc/modules-load.d/{{ item }}.conf" - mode: "0644" - content: "{{ item }}" - loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "iptable_mangle", "iptable_raw", "nbd", "overlay", "rbd", "xt_socket"] - register: modules_status - - name: System Configuration | Reload Kernel modules # noqa: no-changed-when no-handler - when: modules_status.changed - ansible.builtin.systemd: - name: systemd-modules-load - state: restarted - - name: System Configuration | Sysctl - ansible.posix.sysctl: - name: "{{ item.key }}" - value: "{{ item.value }}" - sysctl_file: /etc/sysctl.d/99-kubernetes.conf - reload: true - with_dict: "{{ sysctl_config }}" - vars: - sysctl_config: - fs.inotify.max_queued_events: 65536 - fs.inotify.max_user_watches: 524288 - fs.inotify.max_user_instances: 8192 - net.core.rmem_max: 2500000 - net.core.wmem_max: 2500000 - - handlers: - - name: Reboot - ansible.builtin.reboot: - msg: Rebooting hosts - reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 deleted file mode 100644 index 6fe1fd0df04..00000000000 --- a/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Reboot - hosts: kubernetes - become: true - gather_facts: true - any_errors_fatal: true - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Reboot - ansible.builtin.reboot: - msg: Rebooting hosts - reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 deleted file mode 100644 index acad8fd600a..00000000000 --- a/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 +++ /dev/null @@ -1,70 +0,0 @@ ---- -- name: Cluster rollout update - hosts: kubernetes - become: true - gather_facts: true - any_errors_fatal: true - serial: 1 - pre_tasks: - - name: Pausing for 5 seconds... - ansible.builtin.pause: - seconds: 5 - tasks: - - name: Details - ansible.builtin.command: "k3s kubectl get node {{ inventory_hostname }} -o json" - register: kubectl_get_node - delegate_to: "{{ groups['controllers'][0] }}" - failed_when: false - changed_when: false - - - name: Update - when: - # When status.conditions[x].type == Ready then check stats.conditions[x].status for True|False - - kubectl_get_node['stdout'] | from_json | json_query("status.conditions[?type == 'Ready'].status") - # If spec.unschedulable is defined then the node is cordoned - - not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined - block: - - name: Cordon - kubernetes.core.k8s_drain: - name: "{{ inventory_hostname }}" - kubeconfig: /etc/rancher/k3s/k3s.yaml - state: cordon - delegate_to: "{{ groups['controllers'][0] }}" - - - name: Drain - kubernetes.core.k8s_drain: - name: "{{ inventory_hostname }}" - kubeconfig: /etc/rancher/k3s/k3s.yaml - state: drain - delete_options: - delete_emptydir_data: true - ignore_daemonsets: true - terminate_grace_period: 600 - wait_timeout: 900 - pod_selectors: - - app!=rook-ceph-osd # Rook Ceph - delegate_to: "{{ groups['controllers'][0] }}" - - - name: Update - ansible.builtin.apt: - upgrade: dist - update_cache: true - - - name: Check if reboot is required - ansible.builtin.stat: - path: /var/run/reboot-required - register: reboot_required - - - name: Reboot - when: reboot_required.stat.exists - ansible.builtin.reboot: - msg: Rebooting node - post_reboot_delay: 60 - reboot_timeout: 3600 - - - name: Uncordon - kubernetes.core.k8s_drain: - name: "{{ inventory_hostname }}" - kubeconfig: /etc/rancher/k3s/k3s.yaml - state: uncordon - delegate_to: "{{ groups['controllers'][0] }}" diff --git a/bootstrap/templates/ansible/playbooks/tasks/cilium.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/cilium.yaml.j2 deleted file mode 100644 index ca242bb031b..00000000000 --- a/bootstrap/templates/ansible/playbooks/tasks/cilium.yaml.j2 +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: Cilium - block: - - name: Cilium | Check if Cilium HelmChart exists - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - register: cilium_helmchart - - - name: Cilium | Wait for Cilium to rollout - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: helm-install-cilium - kind: Job - namespace: kube-system - wait: true - wait_condition: - type: Complete - status: true - wait_timeout: 360 - - - name: Cilium | Patch the Cilium HelmChart to unmanage it - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s_json_patch: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - patch: - - op: add - path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged - value: "true" - - - name: Cilium | Delete the Cilium HelmChart CR - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - state: absent - - - name: Cilium | Force delete the Cilium HelmChart - when: cilium_helmchart.resources | count > 0 - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: cilium - kind: HelmChart - namespace: kube-system - state: patched - definition: - metadata: - finalizers: [] diff --git a/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 deleted file mode 100644 index 73697476360..00000000000 --- a/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Cruft - block: - - name: Cruft | Get list of custom manifests - ansible.builtin.find: - paths: "{{ k3s_server_manifests_dir }}" - file_type: file - use_regex: true - patterns: ["^custom-.*"] - register: custom_manifest - - - name: Cruft | Delete custom manifests - ansible.builtin.file: - path: "{{ item.path }}" - state: absent - loop: "{{ custom_manifest.files }}" - - - name: Cruft | Get list of custom addons - kubernetes.core.k8s_info: - kubeconfig: /etc/rancher/k3s/k3s.yaml - kind: Addon - register: addons_list - - - name: Cruft | Delete addons - kubernetes.core.k8s: - kubeconfig: /etc/rancher/k3s/k3s.yaml - name: "{{ item.metadata.name }}" - kind: Addon - namespace: kube-system - state: absent - loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" diff --git a/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 deleted file mode 100644 index 56bf684e595..00000000000 --- a/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Get absolute path to this Git repository # noqa: command-instead-of-module - ansible.builtin.command: git rev-parse --show-toplevel - delegate_to: localhost - become: false - run_once: true - register: repository_path - changed_when: false - check_mode: false - failed_when: repository_path.rc != 0 - -- name: Copy kubeconfig to the project directory - when: k3s_primary_control_node - ansible.builtin.fetch: - src: /etc/rancher/k3s/k3s.yaml - dest: "{{ repository_path.stdout }}/kubeconfig" - flat: true - -- name: Update kubeconfig with the correct load balancer address - delegate_to: localhost - become: false - run_once: true - ansible.builtin.replace: - path: "{{ repository_path.stdout }}/kubeconfig" - regexp: https://127.0.0.1:6443 - replace: "https://{{ k3s_registration_address }}:6443" diff --git a/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 deleted file mode 100644 index 56e56702622..00000000000 --- a/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Version Check - block: - - name: Get deployed k3s version - ansible.builtin.command: k3s --version - register: k3s_version - changed_when: false - failed_when: false - - - name: Extract k3s version - ansible.builtin.set_fact: - current_k3s_version: "{{ k3s_version.stdout | regex_replace('(?im)k3s version (?P[a-z0-9\\.\\+]+).*\n.*', '\\g') }}" - - - name: Check if upgrades are allowed - ansible.builtin.assert: - that: ["k3s_release_version is version(current_k3s_version, '>=')"] - fail_msg: "Unable to upgrade k3s because the deployed version is higher than the one specified in the configuration" diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 deleted file mode 100644 index 3054a59faf3..00000000000 --- a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: cilium - namespace: kube-system -spec: - repo: https://helm.cilium.io/ - chart: cilium - # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io - version: 1.15.5 - targetNamespace: kube-system - bootstrap: true - valuesContent: |- - #% filter indent(width=4, first=True) %# - #% include 'partials/cilium-values-init.partial.yaml.j2' %# - #% endfilter %# diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-ds.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-ds.yaml.j2 deleted file mode 100644 index f62cab4d99f..00000000000 --- a/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-ds.yaml.j2 +++ /dev/null @@ -1,2 +0,0 @@ ---- -#% include 'partials/kube-vip-ds.partial.yaml.j2' %# diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-rbac.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-rbac.yaml.j2 deleted file mode 100644 index 481c2e822c8..00000000000 --- a/bootstrap/templates/ansible/playbooks/templates/custom-kube-vip-rbac.yaml.j2 +++ /dev/null @@ -1,2 +0,0 @@ ---- -#% include 'partials/kube-vip-rbac.partial.yaml.j2' %# diff --git a/bootstrap/templates/ansible/requirements.txt.j2 b/bootstrap/templates/ansible/requirements.txt.j2 deleted file mode 100644 index ef5a6fc3cfd..00000000000 --- a/bootstrap/templates/ansible/requirements.txt.j2 +++ /dev/null @@ -1,4 +0,0 @@ -ansible-lint==24.5.0 -ansible==9.5.1 -jmespath==1.0.1 -openshift==0.13.2 diff --git a/bootstrap/templates/ansible/requirements.yaml.j2 b/bootstrap/templates/ansible/requirements.yaml.j2 deleted file mode 100644 index 91c6e544956..00000000000 --- a/bootstrap/templates/ansible/requirements.yaml.j2 +++ /dev/null @@ -1,14 +0,0 @@ ---- -collections: - - name: ansible.posix - version: 1.5.4 - - name: ansible.utils - version: 4.1.0 - - name: community.general - version: 8.6.0 - - name: kubernetes.core - version: 3.1.0 -roles: - - name: xanmanning.k3s - src: https://github.com/PyratLabs/ansible-role-k3s - version: v3.4.4 diff --git a/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helm-values.yaml.j2 similarity index 72% rename from bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helm-values.yaml.j2 index b1b0c3531c6..a8c7912f837 100644 --- a/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helm-values.yaml.j2 @@ -1,3 +1,4 @@ +--- autoDirectNodeRoutes: true bgpControlPlane: enabled: true @@ -12,15 +13,8 @@ cluster: name: #{ bootstrap_cluster_name|default('home-kubernetes', true) }# cni: exclusive: false - #% if bootstrap_distribution in ["k3s"] %# - binPath: /var/lib/rancher/k3s/data/current/bin - confPath: /var/lib/rancher/k3s/agent/etc/cni/net.d - #% endif %# containerRuntime: integration: containerd - #% if bootstrap_distribution in ["k3s"] %# - socketPath: /var/run/k3s/containerd/containerd.sock - #% endif %# # NOTE: devices might need to be set if you have more than one active NIC on your hosts # devices: eno+ eth+ endpointRoutes: @@ -29,19 +23,14 @@ hubble: enabled: false ipam: mode: kubernetes -ipv4NativeRoutingCIDR: "#{ bootstrap_pod_network }#" +ipv4NativeRoutingCIDR: "${CLUSTER_CIDR}" #% if bootstrap_feature_gates.dual_stack_ipv4_first %# -ipv6NativeRoutingCIDR: "#{ bootstrap_pod_network_v6 }#" +ipv6NativeRoutingCIDR: "${CLUSTER_CIDR_V6}" ipv6: enabled: true #% endif %# -#% if bootstrap_distribution in ["k3s"] %# -k8sServiceHost: 127.0.0.1 -k8sServicePort: 6444 -#% elif bootstrap_distribution in ["talos"] %# k8sServiceHost: 127.0.0.1 k8sServicePort: 7445 -#% endif %# kubeProxyReplacement: true kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 l2announcements: diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 index a7869100850..a433f46faa8 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 @@ -20,7 +20,57 @@ spec: cleanupOnFail: true remediation: retries: 3 + valuesFrom: + - kind: ConfigMap + name: cilium-helm-values values: - #% filter indent(width=4, first=True) %# - #% include 'partials/cilium-values-full.partial.yaml.j2' %# - #% endfilter %# + #% if bootstrap_cloudflare.enabled %# + hubble: + enabled: true + metrics: + enabled: + - dns:query + - drop + - tcp + - flow + - port-distribution + - icmp + - http + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + relay: + enabled: true + rollOutPods: true + prometheus: + serviceMonitor: + enabled: true + ui: + enabled: true + rollOutPods: true + ingress: + enabled: true + className: internal + hosts: ["hubble.${SECRET_DOMAIN}"] + #% endif %# + operator: + prometheus: + enabled: true + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + prometheus: + enabled: true + serviceMonitor: + enabled: true + trustCRDsExist: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 index 5dd7baca73d..b4f3860b0e2 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 @@ -3,3 +3,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./helmrelease.yaml +configMapGenerator: + - name: cilium-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomizeconfig.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomizeconfig.yaml.j2 new file mode 100644 index 00000000000..58f92ba1530 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomizeconfig.yaml.j2 @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helm-values.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helm-values.yaml.j2 new file mode 100644 index 00000000000..56bf2063756 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helm-values.yaml.j2 @@ -0,0 +1,58 @@ +--- +fullnameOverride: coredns +replicaCount: 1 +k8sAppLabelOverride: kube-dns +serviceAccount: + create: true +service: + name: kube-dns + clusterIP: "#{ bootstrap_service_network | nthhost(10) }#" +servers: + - zones: + - zone: . + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: errors + - name: health + configBlock: |- + lameduck 5s + - name: ready + - name: log + configBlock: |- + class error + - name: prometheus + parameters: 0.0.0.0:9153 + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists +tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: coredns diff --git a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml.j2 new file mode 100644 index 00000000000..85fd31e3536 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/helmrelease.yaml.j2 @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: coredns +spec: + interval: 30m + chart: + spec: + chart: coredns + version: 1.29.0 + sourceRef: + kind: HelmRepository + name: coredns + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: coredns-helm-values diff --git a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomization.yaml.j2 new file mode 100644 index 00000000000..39444bbd469 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomization.yaml.j2 @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: coredns-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomizeconfig.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomizeconfig.yaml.j2 new file mode 100644 index 00000000000..58f92ba1530 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/coredns/app/kustomizeconfig.yaml.j2 @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/coredns/ks.yaml.j2 similarity index 50% rename from bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/coredns/ks.yaml.j2 index 7fe74b4af01..766a6c0742e 100644 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/coredns/ks.yaml.j2 @@ -1,20 +1,21 @@ --- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: &app system-upgrade-controller + name: &app coredns namespace: flux-system spec: - targetNamespace: system-upgrade + targetNamespace: kube-system commonMetadata: labels: app.kubernetes.io/name: *app - path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app - prune: true + path: ./kubernetes/apps/kube-system/coredns/app + prune: false # never should be deleted sourceRef: kind: GitRepository name: home-kubernetes - wait: true + wait: false interval: 30m retryInterval: 1m timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/.mjfilter.py deleted file mode 100644 index 0979f9a644c..00000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/.mjfilter.py +++ /dev/null @@ -1 +0,0 @@ -main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml.j2 deleted file mode 100644 index f62cab4d99f..00000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/daemonset.yaml.j2 +++ /dev/null @@ -1,2 +0,0 @@ ---- -#% include 'partials/kube-vip-ds.partial.yaml.j2' %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml.j2 deleted file mode 100644 index cbede828418..00000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/kustomization.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - ./rbac.yaml - - ./daemonset.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml.j2 deleted file mode 100644 index 481c2e822c8..00000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/app/rbac.yaml.j2 +++ /dev/null @@ -1,2 +0,0 @@ ---- -#% include 'partials/kube-vip-rbac.partial.yaml.j2' %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py deleted file mode 100644 index 3ace63dfa7f..00000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py +++ /dev/null @@ -1 +0,0 @@ -main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml.j2 similarity index 96% rename from bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 rename to bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml.j2 index 0bf92493cf4..09d175847bd 100644 --- a/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml.j2 @@ -1,2 +1,3 @@ +--- providerRegex: ^(#{ (bootstrap_node_inventory | map(attribute='name') | join('|')) }#)$ bypassDnsResolution: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 index 86aa4904795..f87d6a6a95a 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 @@ -20,10 +20,10 @@ spec: cleanupOnFail: true remediation: retries: 3 + valuesFrom: + - kind: ConfigMap + name: kubelet-csr-approver-helm-values values: - #% filter indent(width=4, first=True) %# - #% include 'partials/kubelet-csr-approver-values.partial.yaml.j2' %# - #% endfilter %# metrics: enable: true serviceMonitor: diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 index 5dd7baca73d..30dddafcbad 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 @@ -3,3 +3,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./helmrelease.yaml +configMapGenerator: + - name: kubelet-csr-approver-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml.j2 new file mode 100644 index 00000000000..58f92ba1530 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml.j2 @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 index 289af80e958..7a71f70fdfc 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 @@ -4,12 +4,8 @@ kind: Kustomization resources: - ./namespace.yaml - ./cilium/ks.yaml + - ./coredns/ks.yaml - ./metrics-server/ks.yaml - ./reloader/ks.yaml - #% if bootstrap_distribution in ["talos"] %# - ./kubelet-csr-approver/ks.yaml - ./spegel/ks.yaml - #% endif %# - #% if bootstrap_distribution in ["k3s"] %# - - ./kube-vip/ks.yaml - #% endif %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 index 64f412e3d0f..60298df6697 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 @@ -25,9 +25,6 @@ spec: - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --metric-resolution=15s - #% if bootstrap_distribution in ["k3s"] %# - - --kubelet-insecure-tls - #% endif %# metrics: enabled: true serviceMonitor: diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py deleted file mode 100644 index 3ace63dfa7f..00000000000 --- a/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py +++ /dev/null @@ -1 +0,0 @@ -main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helm-values.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helm-values.yaml.j2 new file mode 100644 index 00000000000..a4185ae3689 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helm-values.yaml.j2 @@ -0,0 +1,7 @@ +--- +spegel: + containerdSock: /run/containerd/containerd.sock + containerdRegistryConfigPath: /etc/cri/conf.d/hosts +service: + registry: + hostPort: 29999 diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 index 5c960bbaa51..4200fa89b77 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 @@ -20,12 +20,9 @@ spec: cleanupOnFail: true remediation: retries: 3 + valuesFrom: + - kind: ConfigMap + name: spegel-helm-values values: - spegel: - containerdSock: /run/containerd/containerd.sock - containerdRegistryConfigPath: /etc/cri/conf.d/hosts - service: - registry: - hostPort: 29999 serviceMonitor: enabled: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 index 5dd7baca73d..1e1aa1d17cf 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 @@ -3,3 +3,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./helmrelease.yaml +configMapGenerator: + - name: spegel-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomizeconfig.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomizeconfig.yaml.j2 new file mode 100644 index 00000000000..58f92ba1530 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomizeconfig.yaml.j2 @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/observability/kustomization.yaml.j2 similarity index 63% rename from bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/observability/kustomization.yaml.j2 index e0b2bf29a7e..b213c83e27f 100644 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/observability/kustomization.yaml.j2 @@ -3,5 +3,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./namespace.yaml - - ./system-upgrade-controller/ks.yaml - - ./k3s/ks.yaml + - ./prometheus-operator-crds/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/observability/namespace.yaml.j2 similarity index 81% rename from bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 rename to bootstrap/templates/kubernetes/apps/observability/namespace.yaml.j2 index 5ea024ddef4..ce3a5bd22a0 100644 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/observability/namespace.yaml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: Namespace metadata: - name: system-upgrade + name: observability labels: kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/app/helmrelease.yaml.j2 new file mode 100644 index 00000000000..09d293bb8c3 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/app/helmrelease.yaml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: prometheus-operator-crds +spec: + interval: 30m + chart: + spec: + chart: prometheus-operator-crds + version: 11.0.0 + sourceRef: + kind: HelmRepository + name: prometheus-community + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/app/kustomization.yaml.j2 similarity index 77% rename from bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 rename to bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/app/kustomization.yaml.j2 index c159f45bcee..5dd7baca73d 100644 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/app/kustomization.yaml.j2 @@ -2,4 +2,4 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ./plan.yaml + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/ks.yaml.j2 similarity index 62% rename from bootstrap/templates/kubernetes/apps/kube-system/kube-vip/ks.yaml.j2 rename to bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/ks.yaml.j2 index fcd2c8add63..ffbb5dcb5a6 100644 --- a/bootstrap/templates/kubernetes/apps/kube-system/kube-vip/ks.yaml.j2 +++ b/bootstrap/templates/kubernetes/apps/observability/prometheus-operator-crds/ks.yaml.j2 @@ -2,15 +2,15 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: - name: &app kube-vip + name: &app prometheus-operator-crds namespace: flux-system spec: - targetNamespace: kube-system + targetNamespace: observability commonMetadata: labels: app.kubernetes.io/name: *app - path: ./kubernetes/apps/kube-system/kube-vip/app - prune: true + path: ./kubernetes/apps/observability/prometheus-operator-crds/app + prune: false # never should be deleted sourceRef: kind: GitRepository name: home-kubernetes diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py b/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py deleted file mode 100644 index 0979f9a644c..00000000000 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py +++ /dev/null @@ -1 +0,0 @@ -main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 deleted file mode 100644 index 38784cd5a42..00000000000 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 +++ /dev/null @@ -1,50 +0,0 @@ ---- -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: controllers -spec: - version: "${KUBE_VERSION}" - upgrade: - image: rancher/k3s-upgrade - serviceAccountName: system-upgrade - concurrency: 1 - cordon: true - nodeSelector: - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - - key: node-role.kubernetes.io/control-plane - effect: NoSchedule - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists - - key: node-role.kubernetes.io/etcd - effect: NoExecute - operator: Exists - - key: CriticalAddonsOnly - operator: Exists ---- -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: workers -spec: - version: "${KUBE_VERSION}" - serviceAccountName: system-upgrade - concurrency: 1 - nodeSelector: - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: DoesNotExist - prepare: - image: rancher/k3s-upgrade - args: ["prepare", "controllers"] - upgrade: - image: rancher/k3s-upgrade diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 deleted file mode 100644 index 3ee72dac7e2..00000000000 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 +++ /dev/null @@ -1,26 +0,0 @@ ---- -apiVersion: kustomize.toolkit.fluxcd.io/v1 -kind: Kustomization -metadata: - name: &app system-upgrade-k3s - namespace: flux-system -spec: - targetNamespace: system-upgrade - commonMetadata: - labels: - app.kubernetes.io/name: *app - dependsOn: - - name: system-upgrade-controller - path: ./kubernetes/apps/system-upgrade/k3s/app - prune: true - sourceRef: - kind: GitRepository - name: home-kubernetes - wait: false - interval: 30m - retryInterval: 1m - timeout: 5m - postBuild: - substitute: - # renovate: datasource=github-releases depName=k3s-io/k3s - KUBE_VERSION: v1.30.0+k3s1 diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 deleted file mode 100644 index a9e48714ae0..00000000000 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 +++ /dev/null @@ -1,101 +0,0 @@ ---- -apiVersion: helm.toolkit.fluxcd.io/v2 -kind: HelmRelease -metadata: - name: &app system-upgrade-controller -spec: - interval: 30m - chart: - spec: - chart: app-template - version: 3.1.0 - sourceRef: - kind: HelmRepository - name: bjw-s - namespace: flux-system - install: - remediation: - retries: 3 - upgrade: - cleanupOnFail: true - remediation: - retries: 3 - values: - controllers: - system-upgrade-controller: - strategy: RollingUpdate - containers: - app: - image: - repository: docker.io/rancher/system-upgrade-controller - tag: v0.13.4 - env: - SYSTEM_UPGRADE_CONTROLLER_DEBUG: false - SYSTEM_UPGRADE_CONTROLLER_THREADS: 2 - SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: 900 - SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: 99 - SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: IfNotPresent - SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: registry.k8s.io/kubectl:v1.30.1 - SYSTEM_UPGRADE_JOB_PRIVILEGED: true - SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: 900 - SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m - SYSTEM_UPGRADE_CONTROLLER_NAME: *app - SYSTEM_UPGRADE_CONTROLLER_NAMESPACE: - valueFrom: - fieldRef: - fieldPath: metadata.namespace - securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: { drop: ["ALL"] } - defaultPodOptions: - securityContext: - runAsNonRoot: true - runAsUser: 65534 - runAsGroup: 65534 - seccompProfile: { type: RuntimeDefault } - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - serviceAccount: - create: true - name: system-upgrade - persistence: - tmp: - type: emptyDir - globalMounts: - - path: /tmp - etc-ssl: - type: hostPath - hostPath: /etc/ssl - hostPathType: DirectoryOrCreate - globalMounts: - - path: /etc/ssl - readOnly: true - etc-pki: - type: hostPath - hostPath: /etc/pki - hostPathType: DirectoryOrCreate - globalMounts: - - path: /etc/pki - readOnly: true - etc-ca-certificates: - type: hostPath - hostPath: /etc/ca-certificates - hostPathType: DirectoryOrCreate - globalMounts: - - path: /etc/ca-certificates - readOnly: true diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 deleted file mode 100644 index 49f35511923..00000000000 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - # renovate: datasource=github-releases depName=rancher/system-upgrade-controller - - https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.4/crd.yaml - - helmrelease.yaml - - rbac.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 deleted file mode 100644 index 123677c2abf..00000000000 --- a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system-upgrade -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: system-upgrade - namespace: system-upgrade diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py b/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py deleted file mode 100644 index 3ace63dfa7f..00000000000 --- a/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py +++ /dev/null @@ -1 +0,0 @@ -main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 deleted file mode 100644 index ecaa091764b..00000000000 --- a/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 +++ /dev/null @@ -1,4 +0,0 @@ ---- -#% filter indent(width=0, first=True) %# -#% include 'partials/cilium-values-init.partial.yaml.j2' %# -#% endfilter %# diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 deleted file mode 100644 index 8308db2e62f..00000000000 --- a/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 +++ /dev/null @@ -1,26 +0,0 @@ ---- -repositories: - - name: cilium - url: https://helm.cilium.io - - name: postfinance - url: https://postfinance.github.io/kubelet-csr-approver - -helmDefaults: - wait: true - waitForJobs: true - timeout: 600 - recreatePods: true - force: true - -releases: - - name: cilium - namespace: kube-system - chart: cilium/cilium - version: 1.15.5 - values: ["./cilium-values.yaml"] - - name: kubelet-csr-approver - namespace: kube-system - chart: postfinance/kubelet-csr-approver - version: 1.1.0 - values: ["./kubelet-csr-approver-values.yaml"] - needs: ["cilium"] diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 deleted file mode 100644 index d63b98451ff..00000000000 --- a/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 +++ /dev/null @@ -1,4 +0,0 @@ ---- -#% filter indent(width=0, first=True) %# -#% include 'partials/kubelet-csr-approver-values.partial.yaml.j2' %# -#% endfilter %# diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/helmfile.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/helmfile.yaml.j2 new file mode 100644 index 00000000000..fdf2d4b9233 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/helmfile.yaml.j2 @@ -0,0 +1,43 @@ +--- +repositories: + - name: cilium + url: https://helm.cilium.io + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + +helmDefaults: + wait: true + waitForJobs: true + timeout: 600 + recreatePods: true + force: true + +releases: + - name: prometheus-operator-crds + namespace: observability + chart: oci://ghcr.io/prometheus-community/charts/prometheus-operator-crds + version: 11.0.0 + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.15.5 + values: ["../../../apps/kube-system/cilium/app/helm-values.yaml"] + needs: ["prometheus-operator-crds"] + - name: coredns + namespace: kube-system + chart: coredns/coredns + version: 1.29.0 + values: ["../../../apps/kube-system/coredns/app/helm-values.yaml"] + needs: ["prometheus-operator-crds", "cilium"] + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.1.0 + values: ["../../../apps/kube-system/kubelet-csr-approver/app/helm-values.yaml"] + needs: ["prometheus-operator-crds", "cilium", "coredns"] + - name: spegel + namespace: kube-system + chart: oci://ghcr.io/spegel-org/helm-charts/spegel + version: v0.0.22 + values: ["../../../apps/kube-system/spegel/app/helm-values.yaml"] + needs: ["prometheus-operator-crds", "cilium", "coredns", "kubelet-csr-approver"] diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 index 084030c89c2..7ecf03c45e8 100644 --- a/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 +++ b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 @@ -6,13 +6,13 @@ talosVersion: v1.7.2 kubernetesVersion: v1.30.1 clusterName: &cluster #{ bootstrap_cluster_name|default('home-kubernetes', true) }# -endpoint: https://#{ bootstrap_controllers_vip }#:6443 +endpoint: https://#{ bootstrap_controller_vip }#:6443 clusterPodNets: - "#{ bootstrap_pod_network.split(',')[0] }#" clusterSvcNets: - "#{ bootstrap_service_network.split(',')[0] }#" additionalApiServerCertSans: &sans - - "#{ bootstrap_controllers_vip }#" + - "#{ bootstrap_controller_vip }#" - 127.0.0.1 # KubePrism #% for item in bootstrap_tls_sans %# - "#{ item }#" @@ -25,30 +25,29 @@ nodes: #% for item in bootstrap_node_inventory %# - hostname: "#{ item.name }#" ipAddress: "#{ item.address }#" - #% if item.talos_disk.startswith('/') %# - installDisk: "#{ item.talos_disk }#" + #% if item.disk.startswith('/') %# + installDisk: "#{ item.disk }#" #% else %# installDiskSelector: - serial: "#{ item.talos_disk }#" + serial: "#{ item.disk }#" #% endif %# - #% if bootstrap_talos.secureboot.enabled %# + #% if bootstrap_secureboot.enabled %# machineSpec: secureboot: true - talosImageURL: factory.talos.dev/installer-secureboot/#{ bootstrap_talos.schematic_id }# + talosImageURL: factory.talos.dev/installer-secureboot/#{ bootstrap_schematic_id }# #% else %# - talosImageURL: factory.talos.dev/installer/#{ bootstrap_talos.schematic_id }# + talosImageURL: factory.talos.dev/installer/#{ bootstrap_schematic_id }# #% endif %# controlPlane: #{ (item.controller) | string | lower }# networkInterfaces: - deviceSelector: - hardwareAddr: "#{ item.talos_nic | lower }#" - dhcp: false - #% if bootstrap_talos.vlan %# + hardwareAddr: "#{ item.mac_addr | lower }#" + #% if bootstrap_vlan %# vlans: - - vlanId: #{ bootstrap_talos.vlan }# + - vlanId: #{ bootstrap_vlan }# addresses: - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" - mtu: 1500 + mtu: #{ item.mtu | default(1500) }# routes: - network: 0.0.0.0/0 #% if bootstrap_node_default_gateway %# @@ -58,12 +57,13 @@ nodes: #% endif %# #% if item.controller %# vip: - ip: "#{ bootstrap_controllers_vip }#" + ip: "#{ bootstrap_controller_vip }#" #% endif %# #% else %# + #% if item.address %# + dhcp: false addresses: - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" - mtu: 1500 routes: - network: 0.0.0.0/0 #% if bootstrap_node_default_gateway %# @@ -71,12 +71,16 @@ nodes: #% else %# gateway: "#{ bootstrap_node_network | nthhost(1) }#" #% endif %# + #% else %# + dhcp: true + #% endif %# + mtu: #{ item.mtu | default(1500) }# #% if item.controller %# vip: - ip: "#{ bootstrap_controllers_vip }#" + ip: "#{ bootstrap_controller_vip }#" #% endif %# #% endif %# - #% if bootstrap_talos.user_patches %# + #% if bootstrap_user_patches %# patches: - "@./patches/node_#{ item.name }#.yaml" #% endif %# @@ -123,21 +127,28 @@ patches: nodeIP: validSubnets: ["#{ bootstrap_node_network }#"] + #% if bootstrap_dns_servers | length %# # Force nameserver - |- machine: network: nameservers: - #% for item in bootstrap_dns_servers | default(['1.1.1.1', '1.0.0.1']) %# + #% for item in bootstrap_dns_servers %# - #{ item }# #% endfor %# + #% endif %# + #% if bootstrap_ntp_servers | length %# # Configure NTP - |- machine: time: disabled: false - servers: ["time.cloudflare.com"] + servers: + #% for item in bootstrap_ntp_servers %# + - #{ item }# + #% endfor %# + #% endif %# # Custom sysctl settings - |- @@ -159,7 +170,7 @@ patches: source: /var/openebs/local options: ["bind", "rshared", "rw"] - #% if bootstrap_talos.secureboot.enabled and bootstrap_talos.secureboot.encrypt_disk_with_tpm %# + #% if bootstrap_secureboot.enabled and bootstrap_secureboot.encrypt_disk_with_tpm %# # Encrypt system disk with TPM - |- machine: @@ -176,7 +187,7 @@ patches: tpm: {} #% endif %# - #% if bootstrap_talos.user_patches %# + #% if bootstrap_user_patches %# # User specified global patches - "@./patches/global.yaml" #% endif %# @@ -218,13 +229,11 @@ controlPlane: enabled: true allowedRoles: ["os:admin"] allowedKubernetesNamespaces: ["system-upgrade"] - - #% if bootstrap_talos.user_patches %# + #% if bootstrap_user_patches %# # User specified controlPlane patches - "@./patches/controlPlane.yaml" #% endif %# - -#% if ((bootstrap_talos.user_patches) and (bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length)) %# +#% if ((bootstrap_user_patches) and (bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length)) %# worker: patches: # User specified worker patches diff --git a/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 index b6889a4c305..4f9bb975b95 100644 --- a/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 @@ -24,7 +24,7 @@ spec: kind: OCIRepository name: flux-manifests patches: - # Remove the network policies that does not work with k3s + # Remove the network policies - patch: | $patch: delete apiVersion: networking.k8s.io/v1 diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 new file mode 100644 index 00000000000..3bdbbafbebf --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/coredns.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: coredns + namespace: flux-system +spec: + interval: 1h + url: https://coredns.github.io/helm diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 index 2207b7737cf..71a5903c463 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 @@ -4,16 +4,16 @@ kind: Kustomization resources: - ./bjw-s.yaml - ./cilium.yaml - #% if bootstrap_cloudflare.enabled %# - - ./external-dns.yaml - - ./ingress-nginx.yaml - - ./k8s-gateway.yaml - #% endif %# + - ./coredns.yaml - ./jetstack.yaml - ./metrics-server.yaml - ./openebs.yaml - #% if bootstrap_distribution in ["talos"] %# - ./postfinance.yaml + - ./prometheus-community.yaml - ./spegel.yaml - #% endif %# - ./stakater.yaml + #% if bootstrap_cloudflare.enabled %# + - ./external-dns.yaml + - ./ingress-nginx.yaml + - ./k8s-gateway.yaml + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 index 390e6b70874..b14a64d8e73 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 @@ -1,4 +1,3 @@ -#% if bootstrap_distribution in ["talos"] %# --- apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository @@ -8,4 +7,3 @@ metadata: spec: interval: 1h url: https://postfinance.github.io/kubelet-csr-approver -#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 new file mode 100644 index 00000000000..78c4f0c0fd5 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/prometheus-community.yaml.j2 @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: prometheus-community + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/prometheus-community/charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 index c43e0b2686c..d9a8b2cd300 100644 --- a/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/spegel.yaml.j2 @@ -1,4 +1,3 @@ -#% if bootstrap_distribution in ["talos"] %# --- apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository @@ -9,4 +8,3 @@ spec: type: oci interval: 5m url: oci://ghcr.io/spegel-org/helm-charts -#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 index f176c7f558d..597f9878b11 100644 --- a/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 +++ b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 @@ -5,7 +5,6 @@ metadata: name: cluster-settings namespace: flux-system data: - TIMEZONE: "#{ bootstrap_timezone }#" CLUSTER_CIDR: "#{ bootstrap_pod_network.split(',')[0] }#" NODE_CIDR: "#{ bootstrap_node_network }#" #% if bootstrap_feature_gates.dual_stack_ipv4_first %# diff --git a/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 b/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 deleted file mode 100644 index 7553cf1b966..00000000000 --- a/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 +++ /dev/null @@ -1,129 +0,0 @@ -autoDirectNodeRoutes: true -bgpControlPlane: - enabled: true -bpf: - masquerade: false -cgroup: - automount: - enabled: false - hostRoot: /sys/fs/cgroup -cluster: - id: 1 - name: #{ bootstrap_cluster_name|default('home-kubernetes', true) }# -cni: - exclusive: false - #% if bootstrap_distribution in ["k3s"] %# - binPath: /var/lib/rancher/k3s/data/current/bin - confPath: /var/lib/rancher/k3s/agent/etc/cni/net.d - #% endif %# -containerRuntime: - integration: containerd - #% if bootstrap_distribution in ["k3s"] %# - socketPath: /var/run/k3s/containerd/containerd.sock - #% endif %# -# NOTE: devices might need to be set if you have more than one active NIC on your hosts -# devices: eno+ eth+ -endpointRoutes: - enabled: true -#% if bootstrap_cloudflare.enabled %# -hubble: - enabled: true - metrics: - enabled: - - dns:query - - drop - - tcp - - flow - - port-distribution - - icmp - - http - serviceMonitor: - enabled: true - dashboards: - enabled: true - annotations: - grafana_folder: Cilium - relay: - enabled: true - rollOutPods: true - prometheus: - serviceMonitor: - enabled: true - ui: - enabled: true - rollOutPods: true - ingress: - enabled: true - className: internal - hosts: ["hubble.${SECRET_DOMAIN}"] -#% else %# -hubble: - enabled: false -#% endif %# -ipam: - mode: kubernetes -ipv4NativeRoutingCIDR: "${CLUSTER_CIDR}" -#% if bootstrap_feature_gates.dual_stack_ipv4_first %# -ipv6NativeRoutingCIDR: "${CLUSTER_CIDR_V6}" -ipv6: - enabled: true -#% endif %# -#% if bootstrap_distribution in ["k3s"] %# -k8sServiceHost: 127.0.0.1 -k8sServicePort: 6444 -#% elif bootstrap_distribution in ["talos"] %# -k8sServiceHost: 127.0.0.1 -k8sServicePort: 7445 -#% endif %# -kubeProxyReplacement: true -kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 -l2announcements: - #% if ((bootstrap_bgp.enabled) or (bootstrap_feature_gates.dual_stack_ipv4_first)) %# - enabled: false # https://github.com/cilium/cilium/issues/28985 - #% else %# - enabled: true - #% endif %# -loadBalancer: - algorithm: maglev - mode: snat -localRedirectPolicy: true -operator: - replicas: 1 - rollOutPods: true - prometheus: - enabled: true - serviceMonitor: - enabled: true - dashboards: - enabled: true - annotations: - grafana_folder: Cilium -prometheus: - enabled: true - serviceMonitor: - enabled: true - trustCRDsExist: true -dashboards: - enabled: true - annotations: - grafana_folder: Cilium -rollOutCiliumPods: true -routingMode: native -securityContext: - capabilities: - ciliumAgent: - - CHOWN - - KILL - - NET_ADMIN - - NET_RAW - - IPC_LOCK - - SYS_ADMIN - - SYS_RESOURCE - - DAC_OVERRIDE - - FOWNER - - SETGID - - SETUID - cleanCiliumState: - - NET_ADMIN - - SYS_ADMIN - - SYS_RESOURCE diff --git a/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 b/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 deleted file mode 100644 index 9d77947cd9a..00000000000 --- a/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-vip - namespace: kube-system - labels: - app.kubernetes.io/name: kube-vip -spec: - selector: - matchLabels: - app.kubernetes.io/name: kube-vip - template: - metadata: - labels: - app.kubernetes.io/name: kube-vip - spec: - containers: - - name: kube-vip - image: ghcr.io/kube-vip/kube-vip:v0.8.0 - imagePullPolicy: IfNotPresent - args: ["manager"] - env: - - name: address - value: "#{ bootstrap_controllers_vip }#" - - name: vip_arp - value: "true" - - name: lb_enable - value: "true" - - name: port - value: "6443" - - name: vip_cidr - value: "32" - - name: cp_enable - value: "true" - - name: cp_namespace - value: kube-system - - name: vip_ddns - value: "false" - - name: svc_enable - value: "false" - - name: vip_leaderelection - value: "true" - - name: vip_leaseduration - value: "15" - - name: vip_renewdeadline - value: "10" - - name: vip_retryperiod - value: "2" - - name: prometheus_server - value: :2112 - securityContext: - capabilities: - add: ["NET_ADMIN", "NET_RAW", "SYS_TIME"] - hostAliases: - - hostnames: - - kubernetes - ip: 127.0.0.1 - hostNetwork: true - serviceAccountName: kube-vip - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/master - operator: Exists - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists diff --git a/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 b/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 deleted file mode 100644 index d6ecc93677e..00000000000 --- a/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-vip - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - name: system:kube-vip-role -rules: - - apiGroups: [""] - resources: ["services/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["list","get","watch", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list","get","watch", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["list", "get", "watch", "update", "create"] - - apiGroups: ["discovery.k8s.io"] - resources: ["endpointslices"] - verbs: ["list","get","watch", "update"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:kube-vip-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:kube-vip-role -subjects: -- kind: ServiceAccount - name: kube-vip - namespace: kube-system diff --git a/config.sample.yaml b/config.sample.yaml index d40840af7ad..a20d9d8cae0 100644 --- a/config.sample.yaml +++ b/config.sample.yaml @@ -4,64 +4,29 @@ # 1. (Required) Cluster details - Cluster represents the Kubernetes cluster layer and any additional customizations # -# (Required) Timezone is your IANA formatted timezone (e.g. America/New_York) -bootstrap_timezone: "" - -# (Required) Distribution can either be k3s or talos -bootstrap_distribution: k3s - # (Optional) Cluster name; affects Cilium and Talos +# Default is "home-kubernetes" bootstrap_cluster_name: "" -# (Required: Talos) Talos Specific Options -bootstrap_talos: - # (Optional: Talos) Go to https://factory.talos.dev/ and choose any System Extensions, and/or add kernel arguments you need. - # Copy the generated schematic id and paste it below. - # IMPORTANT: The default ID given here means no System Extensions or Kernel args will be used. - schematic_id: "376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba" - # (Optional: Talos) Add vlan tag to network master device, this is not needed if you tag ports on your switch with the VLAN - # See: https://www.talos.dev/latest/advanced/advanced-networking/#vlans - vlan: "" - # (Optional: Talos) Secureboot and TPM-based disk encryption - secureboot: - # (Optional) Enable secureboot on UEFI systems. Not supported on x86 platforms in BIOS mode. - # See: https://www.talos.dev/latest/talos-guides/install/bare-metal-platforms/secureboot - enabled: false - # (Optional) Enable TPM-based disk encryption. Requires TPM 2.0 - # See: https://www.talos.dev/v1.6/talos-guides/install/bare-metal-platforms/secureboot/#disk-encryption-with-tpm - encrypt_disk_with_tpm: false - # (Optional) Add includes for user provided patches to generated talconfig.yaml. - # See: https://github.com/budimanjojo/talhelper/blob/179ba9ed42f70069c7842109bea24f769f7af6eb/example/extraKernelArgs-patch.yaml - # Patches are applied in this order. (global overrides cp/worker which overrides node-specific). - # Create these files to allow talos:bootstrap-genconfig to complete (empty files are ok). - # kubernetes/bootstrap/talos/patches/node_.yaml # Patches for individual nodes - # kubernetes/bootstrap/talos/patches/controlPlane.yaml # Patches for controlplane nodes - # kubernetes/bootstrap/talos/patches/worker.yaml # Patches for worker nodes - # kubernetes/bootstrap/talos/patches/global.yaml # Patches for ALL nodes - user_patches: false +# (Required) Generated schematic id from https://factory.talos.dev/ +bootstrap_schematic_id: "" # (Required) The CIDR your nodes are on (e.g. 192.168.1.0/24) bootstrap_node_network: "" -# (Optional) The default gateway for the nodes -# Default is .1 derrived from bootstrap_node_network: 'x.x.x.1' -bootstrap_node_default_gateway: "" - # (Required) Use only 1, 3 or more ODD number of controller nodes, recommended is 3 # Worker nodes are optional bootstrap_node_inventory: [] - # - name: "" # (Required) Name of the node (must match [a-z0-9-\.]+) - # address: "" # (Required) IP address of the node - # controller: true # (Required) Set to true if this is a controller node - # talos_disk: "" # (Required: Talos) Device path or serial number of the disk for this node (talosctl disks -n --insecure) - # talos_nic: "" # (Required: Talos) MAC address of the NIC for this node (talosctl get links -n --insecure) - # ssh_user: "" # (Required: k3s) SSH username of the node - # ssh_key: "" # (Optional: k3s) Set specific SSH key for this node + # - name: "" # (Required) Name of the node (must match [a-z0-9-\]+) + # address: "" # (Optional) IP address of the node (Remove if node has a static DHCP reservation) + # controller: true # (Required) Set to true if this is a controller node + # disk: "" # (Required) Device path or serial number of the disk for this node (talosctl disks -n --insecure) + # mac_addr: "" # (Required) MAC address of the NIC for this node (talosctl get links -n --insecure) + # mtu: "" # (Optional) MTU for the NIC, default is 1500 # ... -# (Optional) The DNS server to use for the cluster, this can be an existing -# local DNS server or a public one. -# Default is ["1.1.1.1", "1.0.0.1"] +# (Optional) The DNS servers to use for the cluster nodes. +# Default is pulled from your DHCP server. # If using a local DNS server make sure it meets the following requirements: # 1. your nodes can reach it # 2. it is configured to forward requests to a public DNS server @@ -70,10 +35,9 @@ bootstrap_node_inventory: [] # guarantee that the first DNS server will always be used for every lookup. bootstrap_dns_servers: [] -# (Optional) The DNS search domain to use for the nodes. -# Default is "." -# Use the default or leave empty to avoid possible DNS issues inside the cluster. -bootstrap_search_domain: "" +# (Optional) The NTP servers to use for the cluster nodes. +# Default is pulled from your DHCP server. +bootstrap_ntp_servers: [] # (Required) The pod CIDR for the cluster, this must NOT overlap with any # existing networks and is usually a /16 (64K IPs). @@ -89,13 +53,20 @@ bootstrap_service_network: "10.96.0.0/16" # (Required) The IP address of the Kube API, choose an available IP in # your nodes host network that is NOT being used. This is announced over L2. -# For k3s kube-vip is used, built-in functionality is used with Talos -bootstrap_controllers_vip: "" +bootstrap_controller_vip: "" # (Optional) Add additional SANs to the Kube API cert, this is useful # if you want to call the Kube API by hostname rather than IP bootstrap_tls_sans: [] +# (Optional) The default gateway for the nodes +# Default is .1 which is derrived from bootstrap_node_network (e.g. 192.168.1.1) +bootstrap_node_default_gateway: "" + +# (Optional) Add vlan tag to network master device, this is not needed if you tag ports on your switch with the VLAN +# See: https://www.talos.dev/latest/advanced/advanced-networking/#vlans +bootstrap_vlan: "" + # (Required) Age Public Key (e.g. age1...) # 1. Generate a new key with the following command: # > task sops:age-keygen @@ -119,6 +90,25 @@ bootstrap_bgp: # If you want to use IPv6 check the advanced flags below advertised_network: "" +# (Optional) Secureboot and TPM-based disk encryption +bootstrap_secureboot: + # (Optional) Enable secureboot on UEFI systems. Not supported on x86 platforms in BIOS mode. + # See: https://www.talos.dev/latest/talos-guides/install/bare-metal-platforms/secureboot + enabled: false + # (Optional) Enable TPM-based disk encryption. Requires TPM 2.0 + # See: https://www.talos.dev/v1.6/talos-guides/install/bare-metal-platforms/secureboot/#disk-encryption-with-tpm + encrypt_disk_with_tpm: false + +# (Optional) Add includes for user provided patches to generated talconfig.yaml. +# See: https://github.com/budimanjojo/talhelper/blob/179ba9ed42f70069c7842109bea24f769f7af6eb/example/extraKernelArgs-patch.yaml +# Patches are applied in this order. (global overrides cp/worker which overrides node-specific). +# Create these files to allow talos:bootstrap-genconfig to complete (empty files are ok). +# kubernetes/bootstrap/talos/patches/node_.yaml # Patches for individual nodes +# kubernetes/bootstrap/talos/patches/controlPlane.yaml # Patches for controlplane nodes +# kubernetes/bootstrap/talos/patches/worker.yaml # Patches for worker nodes +# kubernetes/bootstrap/talos/patches/global.yaml # Patches for ALL nodes +bootstrap_user_patches: false + # # 2. (Required) Flux details - Flux is used to manage the cluster configuration. #