diff --git a/.devcontainer/ci/Dockerfile b/.devcontainer/ci/Dockerfile new file mode 100644 index 00000000..e6e945b4 --- /dev/null +++ b/.devcontainer/ci/Dockerfile @@ -0,0 +1,2 @@ +# Ref: https://github.com/devcontainers/ci/issues/191 +FROM mcr.microsoft.com/devcontainers/base:alpine diff --git a/.devcontainer/ci/devcontainer.json b/.devcontainer/ci/devcontainer.json new file mode 100644 index 00000000..b38aa2b5 --- /dev/null +++ b/.devcontainer/ci/devcontainer.json @@ -0,0 +1,27 @@ +{ + "$schema": "https://raw.githubusercontent.com/devcontainers/spec/main/schemas/devContainer.schema.json", + "name": "Flux Cluster Template (CI)", + "build": { + "dockerfile": "./Dockerfile", + "context": "." + }, + "features": { + "./features": {} + }, + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.profiles.linux": { + "bash": { + "path": "/usr/bin/fish" + } + }, + "terminal.integrated.defaultProfile.linux": "fish" + }, + "extensions": [ + "redhat.ansible", + "redhat.vscode-yaml" + ] + } + } +} diff --git a/.devcontainer/ci/features/devcontainer-feature.json b/.devcontainer/ci/features/devcontainer-feature.json new file mode 100644 index 00000000..5f771e34 --- /dev/null +++ b/.devcontainer/ci/features/devcontainer-feature.json @@ -0,0 +1,6 @@ +{ + "name": "Flux Cluster Template (Tools)", + "id": "cluster-template", + "version": "1.0.0", + "description": "Install Tools" +} diff --git a/.devcontainer/ci/features/install.sh b/.devcontainer/ci/features/install.sh new file mode 100644 index 00000000..82bd4162 --- /dev/null +++ b/.devcontainer/ci/features/install.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +set -e +set -o noglob + +apk add --no-cache \ + bash bind-tools ca-certificates curl python3 \ + py3-pip moreutils jq git iputils openssh-client \ + starship fzf fish + +apk add --no-cache \ + --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community \ + age helm kubectl sops + +sudo apk add --no-cache \ + --repository=https://dl-cdn.alpinelinux.org/alpine/edge/testing \ + lsd + +for app in \ + "budimanjojo/talhelper!" \ + "cilium/cilium-cli!!?as=cilium&type=script" \ + "cli/cli!!?as=gh&type=script" \ + "cloudflare/cloudflared!!?as=cloudflared&type=script" \ + "derailed/k9s!!?as=k9s&type=script" \ + "direnv/direnv!!?as=direnv&type=script" \ + "fluxcd/flux2!!?as=flux&type=script" \ + "go-task/task!!?as=task&type=script" \ + "helmfile/helmfile!!?as=helmfile&type=script" \ + "kubecolor/kubecolor!!?as=kubecolor&type=script" \ + "kubernetes-sigs/krew!!?as=krew&type=script" \ + "kubernetes-sigs/kustomize!!?as=kustomize&type=script" \ + "stern/stern!!?as=stern&type=script" \ + "siderolabs/talos!!?as=talosctl&type=script" \ + "yannh/kubeconform!!?as=kubeconform&type=script" \ + "mikefarah/yq!!?as=yq&type=script" +do + echo "=== Installing ${app} ===" + curl -fsSL "https://i.jpillora.com/${app}" | bash +done + +# Create the fish configuration directory +mkdir -p /home/vscode/.config/fish/{completions,conf.d} + +# Setup autocompletions for fish +for tool in cilium flux helm helmfile k9s kubectl kustomize talhelper talosctl; do + $tool completion fish > /home/vscode/.config/fish/completions/$tool.fish +done +gh completion --shell fish > /home/vscode/.config/fish/completions/gh.fish +stern --completion fish > /home/vscode/.config/fish/completions/stern.fish +yq shell-completion fish > /home/vscode/.config/fish/completions/yq.fish + +# Add hooks into fish +tee /home/vscode/.config/fish/conf.d/hooks.fish > /dev/null < /dev/null < /dev/null < /dev/null <\\d+)\\.(?\\d+)\\.(?\\d+)(?\\+k.s)\\.?(?\\d+)$", + "matchPackagePatterns": ["k3s"] + }, + // commit message topics + { + "matchDatasources": ["helm"], + "commitMessageTopic": "chart {{depName}}" + }, + { + "matchDatasources": ["docker"], + "commitMessageTopic": "image {{depName}}" + }, + // commit messages + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(container)!: " + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["digest"], + "semanticCommitType": "chore", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(helm)!: " + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "helm" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "helm" + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(ansible)!: " + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "ansible" + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "ansible" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-release)!: " + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-release" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-release" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-action)!: " + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-action" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-action" + }, + // labels + { + "matchUpdateTypes": ["major"], + "labels": ["type/major"] + }, + { + "matchUpdateTypes": ["minor"], + "labels": ["type/minor"] + }, + { + "matchUpdateTypes": ["patch"], + "labels": ["type/patch"] + }, + { + "matchDatasources": ["docker"], + "addLabels": ["renovate/container"] + }, + { + "matchDatasources": ["helm"], + "addLabels": ["renovate/helm"] + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "addLabels": ["renovate/ansible"] + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "addLabels": ["renovate/github-release"] + }, + { + "matchManagers": ["github-actions"], + "addLabels": ["renovate/github-action"] + } + ], + // custom managers + "customManagers": [ + { + "customType": "regex", + "description": ["Process custom dependencies"], + "fileMatch": [ + "(^|/).taskfiles/.+\\.ya?ml$", + "(^|/)ansible/.+\\.ya?ml(\\.j2)?$", + "(^|/)kubernetes/.+\\.ya?ml(\\.j2)?$" + ], + "matchStrings": [ + // # renovate: datasource=github-releases depName=k3s-io/k3s + // k3s_release_version: v1.29.0+k3s1 + "(?m:# renovate: datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\\n.+?: \"?(?\\S+?)\"?$)", + // # renovate: datasource=github-releases depName=rancher/system-upgrade-controller + // https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml + "(?m:# renovate: datasource=(?\\S+) depName=(?\\S+)\\n.+?\/(?(v|\\d)[^\/]+)\\S+$)" + ], + "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}" + } + ] +} diff --git a/.github/tests/config-k3s-ipv4.yaml b/.github/tests/config-k3s-ipv4.yaml new file mode 100644 index 00000000..7948fee5 --- /dev/null +++ b/.github/tests/config-k3s-ipv4.yaml @@ -0,0 +1,42 @@ +--- +skip_tests: true + +bootstrap_timezone: Etc/UTC +bootstrap_distribution: k3s +bootstrap_node_network: 10.10.10.0/24 +bootstrap_node_default_gateway: 10.10.10.1 +bootstrap_node_inventory: + - name: k8s-controller-0 + address: 10.10.10.100 + controller: true + ssh_user: fake + - name: k8s-worker-0 + address: 10.10.10.101 + controller: false + ssh_user: fake +bootstrap_dns_servers: ["1.1.1.1"] +bootstrap_search_domain: "fake" +bootstrap_pod_network: 10.69.0.0/16 +bootstrap_service_network: 10.96.0.0/16 +bootstrap_controllers_vip: 10.10.10.254 +bootstrap_tls_sans: ["fake"] +bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY +bootstrap_bgp: + enabled: false +bootstrap_github_address: https://github.com/onedr0p/cluster-template +bootstrap_github_branch: main +bootstrap_github_webhook_token: fake +bootstrap_cloudflare: + enabled: true + domain: fake + token: take + acme: + email: fake@example.com + production: false + tunnel: + account_id: fake + id: fake + secret: fake + ingress_vip: 10.10.10.252 + ingress_vip: 10.10.10.251 + gateway_vip: 10.10.10.253 diff --git a/.github/tests/config-k3s-ipv6.yaml b/.github/tests/config-k3s-ipv6.yaml new file mode 100644 index 00000000..5efa50c6 --- /dev/null +++ b/.github/tests/config-k3s-ipv6.yaml @@ -0,0 +1,42 @@ +--- +skip_tests: true + +bootstrap_timezone: Etc/UTC +bootstrap_distribution: k3s +bootstrap_node_network: 10.10.10.0/24 +bootstrap_node_default_gateway: 10.10.10.1 +bootstrap_node_inventory: + - name: k8s-controller-0 + address: 10.10.10.100 + controller: true + ssh_user: fake + - name: k8s-worker-0 + address: 10.10.10.101 + controller: false + ssh_user: fake +bootstrap_dns_servers: ["1.1.1.1"] +bootstrap_search_domain: "fake" +bootstrap_pod_network: 10.42.0.0/16,fd7f:8f5:e87c:a::/64 +bootstrap_service_network: 10.43.0.0/16,fd7f:8f5:e87c:e::/112 +bootstrap_controllers_vip: 10.10.10.254 +bootstrap_tls_sans: ["fake"] +bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY +bootstrap_bgp: + enabled: false +bootstrap_github_address: https://github.com/onedr0p/cluster-template +bootstrap_github_branch: main +bootstrap_github_webhook_token: fake +bootstrap_cloudflare: + enabled: true + domain: fake + token: take + acme: + email: fake@example.com + production: false + tunnel: + account_id: fake + id: fake + secret: fake + ingress_vip: 10.10.10.252 + ingress_vip: 10.10.10.251 + gateway_vip: 10.10.10.253 diff --git a/.github/tests/config-talos.yaml b/.github/tests/config-talos.yaml new file mode 100644 index 00000000..545a5469 --- /dev/null +++ b/.github/tests/config-talos.yaml @@ -0,0 +1,44 @@ +--- +skip_tests: true + +bootstrap_timezone: Etc/UTC +bootstrap_distribution: talos +boostrap_talos: + schematic_id: "df491c50a5acc05b977ef00c32050e1ceb0df746e40b33c643ac8a9bfb7c7263" +bootstrap_node_network: 10.10.10.0/24 +bootstrap_node_default_gateway: 10.10.10.1 +bootstrap_node_inventory: + - name: k8s-controller-0 + address: 10.10.10.100 + controller: true + talos_disk: fake + - name: k8s-worker-0 + address: 10.10.10.101 + controller: false + talos_disk: fake +bootstrap_dns_servers: ["1.1.1.1"] +bootstrap_search_domain: "fake" +bootstrap_pod_network: 10.69.0.0/16 +bootstrap_service_network: 10.96.0.0/16 +bootstrap_controllers_vip: 10.10.10.254 +bootstrap_tls_sans: ["fake"] +bootstrap_sops_age_pubkey: $BOOTSTRAP_AGE_PUBLIC_KEY +bootstrap_bgp: + enabled: false +bootstrap_github_address: https://github.com/onedr0p/cluster-template +bootstrap_github_branch: main +bootstrap_github_webhook_token: fake +bootstrap_cloudflare: + enabled: true + domain: fake + token: take + acme: + email: fake@example.com + production: false + tunnel: + account_id: fake + id: fake + secret: fake + ingress_vip: 10.10.10.252 + ingress_vip: 10.10.10.251 + gateway_vip: 10.10.10.253 diff --git a/.github/workflows/devcontainer.yaml b/.github/workflows/devcontainer.yaml new file mode 100644 index 00000000..00d37c31 --- /dev/null +++ b/.github/workflows/devcontainer.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "devcontainer" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".devcontainer/ci/**"] + pull_request: + branches: ["main"] + paths: [".devcontainer/ci/**"] + schedule: + - cron: "0 0 * * 1" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + devcontainer: + name: publish + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + platforms: linux/amd64,linux/arm64 + + - if: ${{ github.event_name != 'pull_request' }} + name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push + uses: devcontainers/ci@v0.3 + env: + BUILDX_NO_DEFAULT_ATTESTATIONS: true + with: + imageName: ghcr.io/${{ github.repository }}/devcontainer + # cacheFrom: ghcr.io/${{ github.repository }}/devcontainer + imageTag: base,latest + platform: linux/amd64,linux/arm64 + configFile: .devcontainer/ci/devcontainer.json + push: ${{ github.event_name == 'pull_request' && 'never' || 'always' }} diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 00000000..441b1e18 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,107 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "e2e" + +on: + workflow_dispatch: + pull_request: + branches: ["main"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + configure: + name: configure + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + config-files: + - k3s-ipv4 + - k3s-ipv6 + - talos + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Homebrew + id: setup-homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Python + uses: actions/setup-python@v5 + id: setup-python + with: + python-version: "3.11" # minimum supported version + + - name: Cache homebrew packages + if: ${{ github.event_name == 'pull_request' }} + uses: actions/cache@v4 + id: cache-homebrew-packages + with: + key: homebrew-${{ runner.os }}-${{ steps.setup-homebrew.outputs.gems-hash }}-${{ hashFiles('.taskfiles/Workstation/Brewfile') }} + path: /home/linuxbrew/.linuxbrew + + - name: Cache venv + if: ${{ github.event_name == 'pull_request' }} + uses: actions/cache@v4 + with: + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('requirements.txt', 'requirements.yaml') }} + path: .venv + + - name: Setup Workflow Tools + if: ${{ github.event_name == 'pull_request' && steps.cache-homebrew-packages.outputs.cache-hit != 'true' }} + shell: bash + run: brew install go-task + + - name: Run Workstation Brew tasks + if: ${{ github.event_name == 'pull_request' && steps.cache-homebrew-packages.outputs.cache-hit != 'true' }} + shell: bash + run: task workstation:brew + + - name: Run Workstation venv tasks + shell: bash + run: task workstation:venv + + - name: Run Workstation direnv tasks + shell: bash + run: task workstation:direnv + + - name: Run Sops Age key task + shell: bash + run: task sops:age-keygen + + - name: Run init tasks + shell: bash + run: | + task init + cp ./.github/tests/config-${{ matrix.config-files }}.yaml ./config.yaml + export BOOTSTRAP_AGE_PUBLIC_KEY=$(sed -n 's/# public key: //gp' age.key) + envsubst < ./config.yaml | sponge ./config.yaml + + - name: Run configure task + shell: bash + run: task configure --yes + + - name: Run Talos tasks + if: ${{ startsWith(matrix.config-files, 'talos') }} + shell: bash + run: | + task talos:bootstrap-gensecret + task talos:bootstrap-genconfig + + - name: Run Ansible tasks + if: ${{ startsWith(matrix.config-files, 'k3s') }} + shell: bash + run: | + task ansible:deps force=false + task ansible:lint + task ansible:list + + - name: Run repo clean and reset tasks + shell: bash + run: | + task repository:clean + task repository:reset --yes diff --git a/.github/workflows/flux-diff.yaml b/.github/workflows/flux-diff.yaml new file mode 100644 index 00000000..5e942518 --- /dev/null +++ b/.github/workflows/flux-diff.yaml @@ -0,0 +1,68 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Diff" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + flux-diff: + name: Flux Diff + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + strategy: + matrix: + paths: ["kubernetes"] + resources: ["helmrelease", "kustomization"] + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + path: pull + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + ref: "${{ github.event.repository.default_branch }}" + path: default + + - name: Diff Resources + uses: docker://ghcr.io/allenporter/flux-local:main + with: + args: >- + diff ${{ matrix.resources }} + --unified 6 + --path /github/workspace/pull/${{ matrix.paths }} + --path-orig /github/workspace/default/${{ matrix.paths }} + --strip-attrs "helm.sh/chart,checksum/config,app.kubernetes.io/version,chart" + --limit-bytes 10000 + --all-namespaces + --sources "home-kubernetes" + --output-file diff.patch + + - name: Generate Diff + id: diff + run: | + cat diff.patch + echo "diff<> $GITHUB_OUTPUT + cat diff.patch >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - if: ${{ steps.diff.outputs.diff != '' }} + name: Add comment + uses: mshick/add-pr-comment@v2 + with: + message-id: "${{ github.event.pull_request.number }}/${{ matrix.paths }}/${{ matrix.resources }}" + message-failure: Diff was not successful + message: | + ```diff + ${{ steps.diff.outputs.diff }} + ``` diff --git a/.github/workflows/kubeconform.yaml b/.github/workflows/kubeconform.yaml new file mode 100644 index 00000000..58a63cc1 --- /dev/null +++ b/.github/workflows/kubeconform.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Kubeconform" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +env: + KUBERNETES_DIR: ./kubernetes + +jobs: + kubeconform: + name: Kubeconform + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Workflow Tools + run: brew install fluxcd/tap/flux kubeconform kustomize + + - name: Run kubeconform + shell: bash + run: bash ./scripts/kubeconform.sh ${{ env.KUBERNETES_DIR }} diff --git a/.github/workflows/label-sync.yaml b/.github/workflows/label-sync.yaml new file mode 100644 index 00000000..90804e0a --- /dev/null +++ b/.github/workflows/label-sync.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Label Sync" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".github/labels.yaml"] + +jobs: + label-sync: + name: Label Sync + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Sync Labels + uses: EndBug/label-sync@v2 + with: + config-file: .github/labels.yaml + delete-other-labels: true diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml new file mode 100644 index 00000000..d658c1d9 --- /dev/null +++ b/.github/workflows/labeler.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Labeler" + +on: + workflow_dispatch: + pull_request_target: + branches: ["main"] + +jobs: + labeler: + name: Labeler + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Labeler + uses: actions/labeler@v5 + with: + configuration-path: .github/labeler.yaml diff --git a/.github/workflows/lychee.yaml b/.github/workflows/lychee.yaml new file mode 100644 index 00000000..b2e41431 --- /dev/null +++ b/.github/workflows/lychee.yaml @@ -0,0 +1,56 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Lychee" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".github/workflows/lychee.yaml"] + schedule: + - cron: "0 0 * * *" + +env: + LYCHEE_OUTPUT: lychee/out.md + WORKFLOW_ISSUE_TITLE: "Link Checker Dashboard 🔗" + +jobs: + lychee: + name: Lychee + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Scan for broken links + uses: lycheeverse/lychee-action@v1 + id: lychee + with: + token: "${{ secrets.GITHUB_TOKEN }}" + args: --verbose --no-progress --exclude-mail './**/*.md' + output: "${{ env.LYCHEE_OUTPUT }}" + debug: true + + - name: Find Link Checker Issue + id: find-issue + shell: bash + env: + GH_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + run: | + issue_number=$( \ + gh issue list \ + --search "in:title ${{ env.WORKFLOW_ISSUE_TITLE }}" \ + --state open \ + --json number \ + | jq --raw-output '.[0].number' \ + ) + echo "issue-number=${issue_number}" >> $GITHUB_OUTPUT + echo "${issue_number}" + + - name: Create or Update Issue + uses: peter-evans/create-issue-from-file@v5 + with: + token: "${{ secrets.GITHUB_TOKEN }}" + title: "${{ env.WORKFLOW_ISSUE_TITLE }}" + issue-number: "${{ steps.find-issue.outputs.issue-number || '' }}" + content-filepath: "${{ env.LYCHEE_OUTPUT }}" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..fb943f8f --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Release" + +on: + workflow_dispatch: + schedule: + - cron: "0 0 1 * *" + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Create Release + shell: bash + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + run: | + # Retrieve previous release tag + previous_tag="$(gh release list --limit 1 | awk '{ print $1 }')" + previous_major="${previous_tag%%\.*}" + previous_minor="${previous_tag#*.}" + previous_minor="${previous_minor%.*}" + previous_patch="${previous_tag##*.}" + # Determine next release tag + next_major_minor="$(date +'%Y').$(date +'%-m')" + if [[ "${previous_major}.${previous_minor}" == "${next_major_minor}" ]]; then + echo "Month release already exists for year, incrementing patch number by 1" + next_patch="$((previous_patch + 1))" + else + echo "Month release does not exist for year, setting patch number to 0" + next_patch="0" + fi + # Create release + release_tag="${next_major_minor}.${next_patch}" + gh release create "${release_tag}" \ + --repo="${GITHUB_REPOSITORY}" \ + --title="${release_tag}" \ + --generate-notes diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..538c6483 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +# Trash +.DS_Store +Thumbs.db +# k8s +kubeconfig +.decrypted~*.yaml +.config.env +*.agekey +*.pub +*.key +# Private +.private +.bin +# Ansible +.venv* +# Taskfile +.task +# Brew +Brewfile.lock.json +# intellij +.idea +# wiki +wiki +# Bootstrap +/config.yaml diff --git a/.lycheeignore b/.lycheeignore new file mode 100644 index 00000000..8cbc880a --- /dev/null +++ b/.lycheeignore @@ -0,0 +1,2 @@ +https://dash.cloudflare.com/profile/api-tokens +https://www.mend.io/free-developer-tools/renovate/ diff --git a/.taskfiles/Ansible/Taskfile.yaml b/.taskfiles/Ansible/Taskfile.yaml new file mode 100644 index 00000000..02322eaf --- /dev/null +++ b/.taskfiles/Ansible/Taskfile.yaml @@ -0,0 +1,88 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + ANSIBLE_LINT_FILE: "{{.ANSIBLE_DIR}}/.ansible-lint" + ANSIBLE_INVENTORY_FILE: "{{.ANSIBLE_DIR}}/inventory/hosts.yaml" + ANSIBLE_REQUIREMENTS_FILE: "{{.ANSIBLE_DIR}}/requirements.yaml" + ANSIBLE_PIP_REQUIREMENTS_FILE: "{{.ANSIBLE_DIR}}/requirements.txt" + +env: + ANSIBLE_COLLECTIONS_PATH: "{{.VIRTUAL_ENV}}/galaxy" + ANSIBLE_ROLES_PATH: "{{.VIRTUAL_ENV}}/galaxy/ansible_roles" + ANSIBLE_VARS_ENABLED: "host_group_vars" + ANSIBLE_LOCALHOST_WARNING: "False" + ANSIBLE_INVENTORY_UNPARSED_WARNING: "False" + +tasks: + + deps: + desc: Set up Ansible dependencies + deps: [":workstation:venv"] + cmds: + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade --requirement "{{.ANSIBLE_PIP_REQUIREMENTS_FILE}}"' + - '{{.VIRTUAL_ENV}}/bin/ansible-galaxy install --role-file "{{.ANSIBLE_REQUIREMENTS_FILE}}" {{if eq .force "true"}}--force{{end}}' + preconditions: + - { msg: "Missing Ansible requirements file", sh: "test -f {{.ANSIBLE_REQUIREMENTS_FILE}}" } + - { msg: "Missing Pip requirements file", sh: "test -f {{.ANSIBLE_PIP_REQUIREMENTS_FILE}}" } + sources: + - "{{.ANSIBLE_REQUIREMENTS_FILE}}" + - "{{.ANSIBLE_PIP_REQUIREMENTS_FILE}}" + generates: + - "{{.VIRTUAL_ENV}}/bin/ansible" + - "{{.VIRTUAL_ENV}}/bin/ansible-galaxy" + vars: + force: '{{.force | default "true"}}' + + run: + desc: Run an Ansible playbook for configuring a cluster + summary: | + Args: + playbook: Playbook to run (required) + prompt: Run Ansible playbook '{{.playbook}}'... continue? + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible-playbook --inventory {{.ANSIBLE_INVENTORY_FILE}} {{.ANSIBLE_DIR}}/playbooks/{{.playbook}}.yaml {{.CLI_ARGS}}" + requires: + vars: ["playbook"] + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + poweroff: + desc: Shutdown all the k8s nodes + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} -a '/usr/bin/systemctl poweroff' --become" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + list: + desc: List all the hosts + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --list-hosts" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + ping: + desc: Ping all the hosts + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --one-line -m 'ping'" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + uptime: + desc: Uptime of all the hosts + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible kubernetes --inventory {{.ANSIBLE_INVENTORY_FILE}} --one-line -a 'uptime'" + preconditions: + - { msg: "Missing Ansible inventory file", sh: "test -f {{.ANSIBLE_INVENTORY_FILE}}" } + + lint: + desc: Lint Ansible + deps: ["deps"] + cmd: "{{.VIRTUAL_ENV}}/bin/ansible-lint --config-file {{.ANSIBLE_LINT_FILE}} {{.ANSIBLE_DIR}}/**/*.yaml" + preconditions: + - { msg: "Missing Ansible lint file", sh: "test -f {{.ANSIBLE_LINT_FILE}}" } + + .reset: + internal: true + cmd: rm -rf {{.ANSIBLE_DIR}} diff --git a/.taskfiles/Flux/Taskfile.yaml b/.taskfiles/Flux/Taskfile.yaml new file mode 100644 index 00000000..62815435 --- /dev/null +++ b/.taskfiles/Flux/Taskfile.yaml @@ -0,0 +1,68 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + # renovate: datasource=github-releases depName=prometheus-operator/prometheus-operator + PROMETHEUS_OPERATOR_VERSION: v0.71.2 + CLUSTER_SECRET_SOPS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-secrets.sops.yaml" + CLUSTER_SETTINGS_FILE: "{{.KUBERNETES_DIR}}/flux/vars/cluster-settings.yaml" + GITHUB_DEPLOY_KEY_FILE: "{{.KUBERNETES_DIR}}/bootstrap/flux/github-deploy-key.sops.yaml" + +tasks: + + bootstrap: + desc: Bootstrap Flux into a Kubernetes cluster + cmds: + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/{{.PROMETHEUS_OPERATOR_VERSION}}/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/bootstrap/flux + - cat {{.AGE_FILE}} | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin + - sops --decrypt {{.CLUSTER_SECRET_SOPS_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename {{.CLUSTER_SETTINGS_FILE}} + - kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --kustomize {{.KUBERNETES_DIR}}/flux/config + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + + apply: + desc: Apply a Flux Kustomization resource for a cluster + summary: | + Args: + path: Path under apps containing the Flux Kustomization resource (ks.yaml) (required) + ns: Namespace the Flux Kustomization exists in (default: flux-system) + cmd: | + flux --kubeconfig {{.KUBECONFIG_FILE}} build ks $(basename {{.path}}) \ + --namespace {{.ns}} \ + --kustomization-file {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml \ + --path {{.KUBERNETES_DIR}}/apps/{{.path}} \ + {{- if contains "not found" .ks }}--dry-run \{{ end }} + | \ + kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side \ + --field-manager=kustomize-controller -f - + requires: + vars: ["path"] + vars: + ns: '{{.ns | default "flux-system"}}' + ks: + sh: flux --kubeconfig {{.KUBECONFIG_FILE}} --namespace {{.ns}} get kustomizations $(basename {{.path}}) 2>&1 + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Flux Kustomization for app {{.path}}", sh: "test -f {{.KUBERNETES_DIR}}/apps/{{.path}}/ks.yaml" } + + reconcile: + desc: Force update Flux to pull in changes from your Git repository + cmd: flux --kubeconfig {{.KUBECONFIG_FILE}} reconcile --namespace flux-system kustomization cluster --with-source + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + + github-deploy-key: + cmds: + - kubectl create namespace flux-system --dry-run=client -o yaml | kubectl --kubeconfig {{.KUBECONFIG_FILE}} apply --filename - + - sops --decrypt {{.GITHUB_DEPLOY_KEY_FILE}} | kubectl apply --kubeconfig {{.KUBECONFIG_FILE}} --server-side --filename - + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + - { msg: "Missing Github deploy key file", sh: "test -f {{.GITHUB_DEPLOY_KEY_FILE}}" } diff --git a/.taskfiles/Kubernetes/Taskfile.yaml b/.taskfiles/Kubernetes/Taskfile.yaml new file mode 100644 index 00000000..e4f52e0c --- /dev/null +++ b/.taskfiles/Kubernetes/Taskfile.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + KUBECONFORM_SCRIPT: "{{.SCRIPTS_DIR}}/kubeconform.sh" + +tasks: + + resources: + desc: Gather common resources in your cluster, useful when asking for support + cmds: + - for: { var: resource } + cmd: kubectl get {{.ITEM}} {{.CLI_ARGS | default "-A"}} + vars: + resource: >- + nodes + gitrepositories + kustomizations + helmrepositories + helmreleases + certificates + certificaterequests + ingresses + pods + + kubeconform: + desc: Validate Kubernetes manifests with kubeconform + cmd: bash {{.KUBECONFORM_SCRIPT}} {{.KUBERNETES_DIR}} + preconditions: + - { msg: "Missing kubeconform script", sh: "test -f {{.KUBECONFORM_SCRIPT}}" } + + .reset: + internal: true + cmd: rm -rf {{.KUBERNETES_DIR}} diff --git a/.taskfiles/Repository/Taskfile.yaml b/.taskfiles/Repository/Taskfile.yaml new file mode 100644 index 00000000..a0572db8 --- /dev/null +++ b/.taskfiles/Repository/Taskfile.yaml @@ -0,0 +1,39 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +tasks: + + clean: + desc: Clean files and directories no longer needed after cluster bootstrap + cmds: + # Clean up CI + - rm -rf {{.ROOT_DIR}}/.github/tests + - rm -rf {{.ROOT_DIR}}/.github/workflows/e2e.yaml + # Move bootstrap directory to gitignored directory + - mv {{.BOOTSTRAP_DIR}} {{.PRIVATE_DIR}}/bootstrap-{{now | date "150405"}} + - mv {{.MAKEJINJA_CONFIG_FILE}} {{.PRIVATE_DIR}}/makejinja-{{now | date "150405"}}.toml + # Update renovate.json5 + - sed -i {{if eq OS "darwin"}}''{{end}} 's/(..\.j2)\?//g' {{.ROOT_DIR}}/.github/renovate.json5 + preconditions: + - { msg: "Missing bootstrap directory", sh: "test -d {{.BOOTSTRAP_DIR}}" } + - { msg: "Missing private directory", sh: "test -d {{.PRIVATE_DIR}}" } + - { msg: "Missing Renovate config file", sh: "test -f {{.ROOT_DIR}}/.github/renovate.json5" } + + reset: + desc: Reset templated configuration files + prompt: Reset templated configuration files... continue? + cmds: + - task: :ansible:.reset + - task: :kubernetes:.reset + - task: :sops:.reset + - task: :talos:.reset + + force-reset: + desc: Reset repo back to HEAD + prompt: Reset repo back to HEAD... continue? + cmds: + - task: reset + - git reset --hard HEAD + - git clean -f -d + - git pull origin main diff --git a/.taskfiles/Sops/Taskfile.yaml b/.taskfiles/Sops/Taskfile.yaml new file mode 100644 index 00000000..37395765 --- /dev/null +++ b/.taskfiles/Sops/Taskfile.yaml @@ -0,0 +1,41 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + SOPS_CONFIG_FILE: "{{.ROOT_DIR}}/.sops.yaml" + +tasks: + + age-keygen: + desc: Initialize Age Key for Sops + cmd: age-keygen --output {{.AGE_FILE}} + status: + - test -f "{{.AGE_FILE}}" + + encrypt: + desc: Encrypt all Kubernetes SOPS secrets that are not already encrypted + cmds: + - for: { var: file } + task: .encrypt-file + vars: + file: "{{.ITEM}}" + vars: + file: + sh: | + if [ -d "{{.KUBERNETES_DIR}}" ]; then + find "{{.KUBERNETES_DIR}}" -type f -name "*.sops.*" -exec grep -L "ENC\[AES256_GCM" {} \; + fi + + .encrypt-file: + internal: true + cmd: sops --encrypt --in-place {{.file}} + requires: + vars: ["file"] + preconditions: + - { msg: "Missing Sops config file", sh: "test -f {{.SOPS_CONFIG_FILE}}" } + - { msg: "Missing Sops Age key file", sh: "test -f {{.AGE_FILE}}" } + + .reset: + internal: true + cmd: rm -rf {{.SOPS_CONFIG_FILE}} diff --git a/.taskfiles/Talos/Taskfile.yaml b/.taskfiles/Talos/Taskfile.yaml new file mode 100644 index 00000000..0ffeeec4 --- /dev/null +++ b/.taskfiles/Talos/Taskfile.yaml @@ -0,0 +1,109 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + TALOS_DIR: "{{.KUBERNETES_DIR}}/bootstrap/talos" + TALHELPER_SECRET_FILE: "{{.TALOS_DIR}}/talhelper.sops.yaml" + TALHELPER_CONFIG_FILE: "{{.TALOS_DIR}}/talconfig.yaml" + +env: + TALOSCONFIG: "{{.TALOS_DIR}}/clusterconfig/talosconfig" + +tasks: + + bootstrap: + desc: Bootstrap the Talos cluster + dir: "{{.TALOS_DIR}}" + cmds: + - task: bootstrap-gensecret + - task: bootstrap-genconfig + - task: bootstrap-apply + - task: bootstrap-install + - task: fetch-kubeconfig + - task: bootstrap-apps + - talosctl health --server=false + + bootstrap-gensecret: + desc: Generate the Talos secrets + dir: "{{.TALOS_DIR}}" + cmds: + - talhelper gensecret > {{.TALHELPER_SECRET_FILE}} + - task: :sops:.encrypt-file + vars: + file: "{{.TALHELPER_SECRET_FILE}}" + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + status: + - test -f "{{.TALHELPER_SECRET_FILE}}" + + bootstrap-genconfig: + desc: Generate the Talos configs + dir: "{{.TALOS_DIR}}" + cmd: talhelper genconfig + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + + bootstrap-apply: + desc: Apply the Talos config on a node + dir: "{{.TALOS_DIR}}" + cmd: talhelper gencommand apply --extra-flags=--insecure | bash + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + + bootstrap-install: + desc: Install the Talos cluster + dir: "{{.TALOS_DIR}}" + cmds: + - echo "Installing Talos... ignore the errors and be patient" + - until talhelper gencommand bootstrap | bash; do sleep 10; done + - sleep 10 + preconditions: + - { msg: "Missing talhelper config file", sh: "test -f {{.TALHELPER_CONFIG_FILE}}" } + + bootstrap-apps: + desc: Bootstrap core apps needed for Talos + dir: "{{.TALOS_DIR}}" + cmds: + - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready=False nodes --all --timeout=600s; do sleep 10; done + - helmfile --file ./apps/helmfile.yaml apply --skip-diff-on-install --suppress-diff + - until kubectl --kubeconfig {{.KUBECONFIG_FILE}} wait --for=condition=Ready nodes --all --timeout=600s; do sleep 10; done + preconditions: + - { msg: "Missing kubeconfig", sh: "test -f {{.KUBECONFIG_FILE}}" } + + upgrade-talos: + desc: Upgrade talos on a node + cmd: talosctl --nodes {{.node}} upgrade --image {{.image}} --preserve=true --reboot-mode=default + requires: + vars: ["node", "image"] + preconditions: + - { msg: "Node not found", sh: "talosctl --nodes {{.node}} get machineconfig" } + + upgrade-k8s: + desc: Upgrade k8s on a node + cmd: talosctl --nodes {{.node}} upgrade-k8s --to {{.to}} + requires: + vars: ["node", "to"] + preconditions: + - { msg: "Node not found", sh: "talosctl --nodes {{.node}} get machineconfig" } + + fetch-kubeconfig: + desc: Generate talos kubeconfig + dir: "{{.TALOS_DIR}}" + cmd: until talhelper gencommand kubeconfig --extra-flags "--force" | bash; do sleep 10; done + + soft-nuke: + desc: Resets nodes back to maintenance mode so you can re-deploy again straight after + prompt: This will destroy your cluster and reset the nodes back to maintenance mode... continue? + dir: "{{.TALOS_DIR}}" + cmd: talhelper gencommand reset --extra-flags "--reboot --system-labels-to-wipe STATE --system-labels-to-wipe EPHEMERAL --graceful=false --wait=false" | bash + + hard-nuke: + desc: Resets nodes back completely and reboots them + prompt: This will destroy your cluster and reset the nodes... continue? + dir: "{{.TALOS_DIR}}" + cmd: talhelper gencommand reset --extra-flags "--reboot --graceful=false --wait=false" | bash + + .reset: + internal: true + cmd: rm -rf {{.TALOS_DIR}} diff --git a/.taskfiles/Workstation/Archfile b/.taskfiles/Workstation/Archfile new file mode 100644 index 00000000..b1ad3160 --- /dev/null +++ b/.taskfiles/Workstation/Archfile @@ -0,0 +1,17 @@ +age +cloudflared-bin +direnv +flux-bin +go-task +go-yq +helm +helmfile +jq +kubeconform +kubectl-bin +kustomize +moreutils +sops +stern-bin +talhelper-bin +talosctl diff --git a/.taskfiles/Workstation/Brewfile b/.taskfiles/Workstation/Brewfile new file mode 100644 index 00000000..0d31dc67 --- /dev/null +++ b/.taskfiles/Workstation/Brewfile @@ -0,0 +1,20 @@ +tap "fluxcd/tap" +tap "go-task/tap" +tap "siderolabs/talos" +brew "age" +brew "cloudflared" +brew "direnv" +brew "fluxcd/tap/flux" +brew "go-task/tap/go-task" +brew "helm" +brew "helmfile" +brew "jq" +brew "kubeconform" +brew "kubernetes-cli" +brew "kustomize" +brew "moreutils" +brew "sops" +brew "stern" +brew "talhelper" +brew "talosctl" +brew "yq" diff --git a/.taskfiles/Workstation/Taskfile.yaml b/.taskfiles/Workstation/Taskfile.yaml new file mode 100644 index 00000000..09f309f6 --- /dev/null +++ b/.taskfiles/Workstation/Taskfile.yaml @@ -0,0 +1,71 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + ARCHFILE: "{{.ROOT_DIR}}/.taskfiles/Workstation/Archfile" + BREWFILE: "{{.ROOT_DIR}}/.taskfiles/Workstation/Brewfile" + GENERIC_BIN_DIR: "{{.ROOT_DIR}}/.bin" + +tasks: + + direnv: + desc: Run direnv hooks + cmd: direnv allow . + status: + - "[[ $(direnv status --json | jq '.state.foundRC.allowed') == 0 ]]" + - "[[ $(direnv status --json | jq '.state.loadedRC.allowed') == 0 ]]" + + venv: + desc: Set up virtual environment + cmds: + - "{{.PYTHON_BIN}} -m venv {{.VIRTUAL_ENV}}" + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade pip setuptools wheel' + - '{{.VIRTUAL_ENV}}/bin/python3 -m pip install --upgrade --requirement "{{.PIP_REQUIREMENTS_FILE}}"' + sources: + - "{{.PIP_REQUIREMENTS_FILE}}" + generates: + - "{{.VIRTUAL_ENV}}/pyvenv.cfg" + preconditions: + - { msg: "Missing Pip requirements file", sh: "test -f {{.PIP_REQUIREMENTS_FILE}}" } + + brew: + desc: Install workstation dependencies with Brew + cmd: brew bundle --file {{.BREWFILE}} + preconditions: + - { msg: "Missing Homebrew", sh: "command -v brew" } + - { msg: "Missing Brewfile", sh: "test -f {{.BREWFILE}}" } + + arch: + desc: Install Arch workstation dependencies with Paru Or Yay + cmd: "{{.helper}} -Syu --needed --noconfirm --noprogressbar $(cat {{.ARCHFILE}} | xargs)" + vars: + helper: + sh: "command -v yay || command -v paru" + preconditions: + - { msg: "Missing Archfile", sh: "test -f {{.ARCHFILE}}" } + + generic-linux: + desc: Install CLI tools into the projects .bin directory using curl + dir: "{{.GENERIC_BIN_DIR}}" + platforms: ["linux/amd64", "linux/arm64"] + cmds: + - for: + - budimanjojo/talhelper?as=talhelper&type=script + - cloudflare/cloudflared?as=cloudflared&type=script + - FiloSottile/age?as=age&type=script + - fluxcd/flux2?as=flux&type=script + - getsops/sops?as=sops&type=script + - helmfile/helmfile?as=helmfile&type=script + - jqlang/jq?as=jq&type=script + - kubernetes-sigs/kustomize?as=kustomize&type=script + - siderolabs/talos?as=talosctl&type=script + - yannh/kubeconform?as=kubeconform&type=script + - mikefarah/yq?as=yq&type=script + cmd: curl -fsSL "https://i.jpillora.com/{{.ITEM}}" | bash + - cmd: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + platforms: ["linux/amd64"] + - cmd: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" + platforms: ["linux/arm64"] + - cmd: chmod +x kubectl + - cmd: curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | USE_SUDO="false" HELM_INSTALL_DIR="." bash diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 00000000..c8f11210 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,15 @@ +{ + "recommendations": [ + "albert.TabOut", + "britesnow.vscode-toggle-quotes", + "fcrespo82.markdown-table-formatter", + "mikestead.dotenv", + "mitchdenny.ecdc", + "redhat.ansible", + "signageos.signageos-vscode-sops", + "will-stone.in-any-case", + "EditorConfig.editorconfig", + "PKief.material-icon-theme", + "Gruntfuggly.todo-tree" + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..8f29572b --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,27 @@ +{ + "ansible.ansible.path": ".venv/bin/ansible", + "ansible.python.activationScript": ".venv/bin/activate", + "ansible.python.interpreterPath": ".venv/bin/python3", + "ansible.validation.enabled": true, + "ansible.validation.lint.arguments": "-c ansible/.ansible-lint", + "ansible.validation.lint.enabled": true, + "ansible.validation.lint.path": ".venv/bin/ansible-lint", + "files.associations": { + "*.json5": "jsonc", + "./ansible/**/*.yaml": "ansible", + "./ansible/**/*.sops.yaml": "yaml", + "./ansible/**/inventory/**/*.yaml": "yaml", + "./kubernetes/**/*.sops.toml": "plaintext" + }, + "sops.defaults.ageKeyFile": "age.key", + "yaml.schemas": { + "ansible": "./ansible/*.yaml", + "Kubernetes": "./kubernetes/*.yaml" + }, + "vs-kubernetes": { + "vs-kubernetes.kubeconfig": "./kubeconfig", + "vs-kubernetes.knownKubeconfigs": [ + "./kubeconfig" + ] + } +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..ab784ede --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 onedr0p + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 00000000..8b6dd091 --- /dev/null +++ b/README.md @@ -0,0 +1,516 @@ +# ⛵ Cluster Template + +Welcome to my opinionated and extensible template for deploying a single Kubernetes cluster. The goal of this project is to make it easier for people interested in using Kubernetes to deploy a cluster at home on bare-metal or VMs. + +At a high level this project makes use of [makejinja](https://github.com/mirkolenz/makejinja) to read in a [configuration file](./config.sample.yaml) which will render out pre-made templates that you can then use to customize your Kubernetes experience further. + +## ✨ Features + +The features included will depend on the type of configuration you want to use. There are currently **2 different types** of **configurations** available with this template. + +1. **"Flux cluster"** - a Kubernetes distribution of your choosing: [k3s](https://github.com/k3s-io/k3s) or [Talos](https://github.com/siderolabs/talos). Deploys an opinionated implementation of [Flux](https://github.com/fluxcd/flux2) using [GitHub](https://github.com/) as the Git provider and [sops](https://github.com/getsops/sops) to manage secrets. + + - **Required:** Debian 12 or Talos Linux installed on bare metal (or VMs) and some knowledge of [Containers](https://opencontainers.org/) and [YAML](https://yaml.org/). Some knowledge of [Git](https://git-scm.com/) practices & terminology is also required. + - **Components:** [Cilium](https://github.com/cilium/cilium) and [kube-vip](https://github.com/kube-vip/kube-vip) _(k3s)_. [flux](https://github.com/fluxcd/flux2), [cert-manager](https://github.com/cert-manager/cert-manager), [spegel](https://github.com/XenitAB/spegel), [reloader](https://github.com/stakater/Reloader), [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) _(k3s)_, and [openebs](https://github.com/openebs/openebs). + +3. **"Flux cluster with Cloudflare"** - An addition to "**Flux cluster**" that provides DNS and SSL with [Cloudflare](https://www.cloudflare.com/). [Cloudflare Tunnel](https://www.cloudflare.com/products/tunnel/) is also included to provide external access to certain applications deployed in your cluster. + + - **Required:** A Cloudflare account with a domain managed in your Cloudflare account. + - **Components:** [ingress-nginx](https://github.com/kubernetes/ingress-nginx/), [external-dns](https://github.com/kubernetes-sigs/external-dns) and [cloudflared](https://github.com/cloudflare/cloudflared). + +**Other features include:** + +- A [Renovate](https://www.mend.io/renovate)-ready repository with pull request diffs provided by [flux-local](https://github.com/allenporter/flux-local) +- Integrated [GitHub Actions](https://github.com/features/actions) with helpful workflows. + +## 💻 Machine Preparation + +Hopefully some of this peeked your interests! If you are marching forward, now is a good time to choose whether you will deploy a Kubernetes cluster with [k3s](https://github.com/k3s-io/k3s) or [Talos](https://github.com/siderolabs/talos). + +### System requirements + +> [!NOTE] +> 1. The included behaviour of Talos or k3s is that all nodes are able to run workloads, **including** the controller nodes. **Worker nodes** are therefore **optional**. +> 2. Do you have 3 or more nodes? It is highly recommended to make 3 of them controller nodes for a highly available control plane. +> 3. Running the cluster on Proxmox VE? My thoughts and recommendations about that are documented [here](https://onedr0p.github.io/home-ops/notes/proxmox-considerations.html). + +| Role | Cores | Memory | System Disk | +|---------|----------|---------------|---------------------------| +| Control | 4 _(6*)_ | 8GB _(24GB*)_ | 100GB _(500GB*)_ SSD/NVMe | +| Worker | 4 _(6*)_ | 8GB _(24GB*)_ | 100GB _(500GB*)_ SSD/NVMe | +| _\* recommended_ | + +### Talos + +1. Download the latest stable release of Talos from their [GitHub releases](https://github.com/siderolabs/talos/releases). You will want to grab either `metal-amd64.iso` or `metal-rpi_generic-arm64.raw.xz` depending on your system. + +2. Take note of the OS drive serial numbers you will need them later on. + +3. Flash the iso or raw file to a USB drive and boot to Talos on your nodes with it. + +4. Continue on to 🚀 [**Getting Started**](#-getting-started) + +### k3s (AMD64) + +1. Download the latest stable release of Debian from [here](https://cdimage.debian.org/debian-cd/current/amd64/iso-dvd), then follow [this guide](https://www.linuxtechi.com/how-to-install-debian-12-step-by-step) to get it installed. Deviations from the guide: + + ```txt + Choose "Guided - use entire disk" + Choose "All files in one partition" + Delete Swap partition + Uncheck all Debian desktop environment options + ``` + +2. [Post install] Remove CD/DVD as apt source + + ```sh + su - + sed -i '/deb cdrom/d' /etc/apt/sources.list + apt update + exit + ``` + +3. [Post install] Enable sudo for your non-root user + + ```sh + su - + apt update + apt install -y sudo + usermod -aG sudo ${username} + echo "${username} ALL=(ALL) NOPASSWD:ALL" | tee /etc/sudoers.d/${username} + exit + newgrp sudo + sudo apt update + ``` + +4. [Post install] Add SSH keys (or use `ssh-copy-id` on the client that is connecting) + + 📍 _First make sure your ssh keys are up-to-date and added to your github account as [instructed](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account)._ + + ```sh + mkdir -m 700 ~/.ssh + sudo apt install -y curl + curl https://github.com/${github_username}.keys > ~/.ssh/authorized_keys + chmod 600 ~/.ssh/authorized_keys + ``` + +### k3s (RasPi4) + +
+Click here to read about using a RasPi4 + + +> [!NOTE] +> 1. It is recommended to have an 8GB RasPi model. Most important is to **boot from an external SSD/NVMe** rather than an SD card. This is [supported natively](https://www.raspberrypi.com/documentation/computers/raspberry-pi.html), however if you have an early model you may need to [update the bootloader](https://www.tomshardware.com/how-to/boot-raspberry-pi-4-usb) first. +> 2. Check the [power requirements](https://www.raspberrypi.com/documentation/computers/raspberry-pi.html#power-supply) if using a PoE Hat and a SSD/NVMe dongle. + +1. Download the latest stable release of Debian from [here](https://raspi.debian.net/tested-images). _**Do not** use Raspbian or DietPi or any other flavor Linux OS._ + +2. Flash the image onto an SSD/NVMe drive. + +3. Re-mount the drive to your workstation and then do the following (per the [official documentation](https://raspi.debian.net/defaults-and-settings)): + + ```txt + Open 'sysconf.txt' in a text editor and save it upon updating the information below + - Change 'root_authorized_key' to your desired public SSH key + - Change 'root_pw' to your desired root password + - Change 'hostname' to your desired hostname + ``` + +4. Connect SSD/NVMe drive to the Raspberry Pi 4 and power it on. + +5. [Post install] SSH into the device with the `root` user and then create a normal user account with `adduser ${username}` + +6. [Post install] Follow steps 3 and 4 from [k3s (AMD64)](##k3s-amd64). + +7. [Post install] Install `python3` which is needed by Ansible. + + ```sh + sudo apt install -y python3 + ``` + +8. Continue on to 🚀 [**Getting Started**](#-getting-started) + +
+ +## 🚀 Getting Started + +Once you have installed Talos or Debian on your nodes, there are six stages to getting a Flux-managed cluster up and runnning. + +> [!NOTE] +> For all stages below the commands **MUST** be ran on your personal workstation within your repository directory + +### 🎉 Stage 1: Create a Git repository + +1. Create a new **public** repository by clicking the big green "Use this template" button at the top of this page. + +2. Clone **your new repo** to you local workstation and `cd` into it. + +3. Continue on to 🌱 [**Stage 2**](#-stage-2-setup-your-local-workstation-environment) + +### 🌱 Stage 2: Setup your local workstation + +You have two different options for setting up your local workstation. + +- First option is using a `devcontainer` which requires you to have Docker and VSCode installed. This method is the fastest to get going because all the required CLI tools are provided for you in my [devcontainer](https://github.com/onedr0p/cluster-template/pkgs/container/cluster-template%2Fdevcontainer) image. +- The second option is setting up the CLI tools directly on your workstation. + +#### Devcontainer method + +1. Start Docker and open your repository in VSCode. There will be a pop-up asking you to use the `devcontainer`, click the button to start using it. + +2. Continue on to 🔧 [**Stage 3**](#-stage-3-bootstrap-configuration) + +#### Non-devcontainer method + +1. Install the most recent version of [task](https://taskfile.dev/), see the [installation docs](https://taskfile.dev/installation/) for other supported platforms. + + ```sh + # Homebrew + brew install go-task + # or, Arch + pacman -S --noconfirm go-task && ln -sf /usr/bin/go-task /usr/local/bin/task + ``` + +2. Install the most recent version of [direnv](https://direnv.net/), see the [installation docs](https://direnv.net/docs/installation.html) for other supported platforms. + + ```sh + # Homebrew + brew install direnv + # or, Arch + pacman -S --noconfirm direnv + ``` + + 📍 _After `direnv` is installed be sure to **[hook it into your preferred shell](https://direnv.net/docs/hook.html)** and then run `task workstation:direnv`_ + +3. Install the additional **required** CLI tools + + 📍 _**Not using Homebrew or ArchLinux?** Try using the generic Linux task below, if that fails check out the [Brewfile](.taskfiles/Workstation/Brewfile)/[Archfile](.taskfiles/Workstation/Archfile) for what CLI tools needed and install them._ + + ```sh + # Homebrew + task workstation:brew + # or, Arch with yay/paru + task workstation:arch + # or, Generic Linux (YMMV, this pulls binaires in to ./bin) + task workstation:generic-linux + ``` + +4. Setup a Python virual environment by running the following task command. + + 📍 _This commands requires Python 3.11+ to be installed._ + + ```sh + task workstation:venv + ``` + +5. Continue on to 🔧 [**Stage 3**](#-stage-3-bootstrap-configuration) + +### 🔧 Stage 3: Bootstrap configuration + +> [!NOTE] +> The [config.sample.yaml](./config.sample.yaml) file contains config that is **vital** to the bootstrap process. + +1. Generate the `config.yaml` from the [config.sample.yaml](./config.sample.yaml) configuration file. + + ```sh + task init + ``` + +2. Fill out the `config.yaml` configuration file using the comments in that file as a guide. + +3. Run the following command which will generate all the files needed to continue. + + ```sh + task configure + ``` + +4. Push you changes to git + + 📍 _**Verify** all the `./kubernetes/**/*.sops.*` files are **encrypted** with SOPS_ + + ```sh + git add -A + git commit -m "Initial commit :rocket:" + git push + ``` + +5. Continue on to ⚡ [**Stage 4**](#-stage-4-prepare-your-nodes-for-kubernetes) + +### ⚡ Stage 4: Prepare your nodes for Kubernetes + +> [!NOTE] +> For **Talos** skip ahead to ⛵ [**Stage 5**](#-stage-5-install-kubernetes) + +#### k3s + +📍 _Here we will be running an Ansible playbook to prepare your nodes for running a Kubernetes cluster._ + +1. Ensure you are able to SSH into your nodes from your workstation using a private SSH key **without a passphrase** (for example using a SSH agent). This lets Ansible interact with your nodes. + +3. Install the Ansible dependencies + + ```sh + task ansible:deps + ``` + +4. Verify Ansible can view your config and ping your nodes + + ```sh + task ansible:list + task ansible:ping + ``` + +5. Run the Ansible prepare playbook (nodes wil reboot when done) + + ```sh + task ansible:run playbook=cluster-prepare + ``` + +6. Continue on to ⛵ [**Stage 5**](#-stage-5-install-kubernetes) + +### ⛵ Stage 5: Install Kubernetes + +#### Talos + +1. Deploy your cluster and bootstrap it. This generates secrets, generates the config files for your nodes and applies them. It bootstraps the cluster afterwards, fetches the kubeconfig file and installs Cilium and kubelet-csr-approver. It finishes with some health checks. + + ```sh + task talos:bootstrap + ``` + +#### k3s + +1. Install Kubernetes depending on the distribution you chose + + ```sh + task ansible:run playbook=cluster-installation + ``` + +#### Cluster validation + +1. The `kubeconfig` for interacting with your cluster should have been created in the root of your repository. + +2. Verify the nodes are online + + 📍 _If this command **fails** you likely haven't configured `direnv` as mentioned previously in the guide._ + + ```sh + kubectl get nodes -o wide + # NAME STATUS ROLES AGE VERSION + # k8s-0 Ready control-plane,etcd,master 1h v1.29.1 + # k8s-1 Ready worker 1h v1.29.1 + ``` + +3. Continue on to 🔹 [**Stage 6**](#-stage-6-install-flux-in-your-cluster) + +### 🔹 Stage 6: Install Flux in your cluster + +1. Verify Flux can be installed + + ```sh + flux check --pre + # ► checking prerequisites + # ✔ kubectl 1.27.3 >=1.18.0-0 + # ✔ Kubernetes 1.27.3+k3s1 >=1.16.0-0 + # ✔ prerequisites checks passed + ``` + +2. Install Flux and sync the cluster to the Git repository + + 📍 _Run `task flux:github-deploy-key` first if using a private repository._ + + ```sh + task flux:bootstrap + # namespace/flux-system configured + # customresourcedefinition.apiextensions.k8s.io/alerts.notification.toolkit.fluxcd.io created + # ... + ``` + +1. Verify Flux components are running in the cluster + + ```sh + kubectl -n flux-system get pods -o wide + # NAME READY STATUS RESTARTS AGE + # helm-controller-5bbd94c75-89sb4 1/1 Running 0 1h + # kustomize-controller-7b67b6b77d-nqc67 1/1 Running 0 1h + # notification-controller-7c46575844-k4bvr 1/1 Running 0 1h + # source-controller-7d6875bcb4-zqw9f 1/1 Running 0 1h + ``` + +### 🎤 Verification Steps + +_Mic check, 1, 2_ - In a few moments applications should be lighting up like Christmas in July 🎄 + +1. Output all the common resources in your cluster. + + 📍 _Feel free to use the provided [kubernetes tasks](.taskfiles/Kubernetes/Taskfile.yaml) for validation of cluster resources or continue to get familiar with the `kubectl` and `flux` CLI tools._ + + ```sh + task kubernetes:resources + ``` + +2. ⚠️ It might take `cert-manager` awhile to generate certificates, this is normal so be patient. + +3. 🏆 **Congratulations** if all goes smooth you will have a Kubernetes cluster managed by Flux and your Git repository is driving the state of your cluster. + +4. 🧠 Now it's time to pause and go get some motel motor oil ☕ and admire you made it this far! + +## 📣 Flux w/ Cloudflare post installation + +#### 🌐 Public DNS + +The `external-dns` application created in the `networking` namespace will handle creating public DNS records. By default, `echo-server-external` and the `flux-webhook` are the only subdomains reachable from the public internet. In order to make additional applications public you must set set the correct ingress class name and ingress annotations like in the HelmRelease for `echo-server`. + +#### 🏠 Home DNS + +`k8s_gateway` will provide DNS resolution to external Kubernetes resources (i.e. points of entry to the cluster) from any device that uses your home DNS server. For this to work, your home DNS server must be configured to forward DNS queries for `${bootstrap_cloudflare.domain}` to `${bootstrap_cloudflare.gateway_vip}` instead of the upstream DNS server(s) it normally uses. This is a form of **split DNS** (aka split-horizon DNS / conditional forwarding). + +> [!TIP] +> Below is how to configure a Pi-hole for split DNS. Other platforms should be similar. +> 1. Apply this file on the Pihole server while substituting the variables +> ```sh +> # /etc/dnsmasq.d/99-k8s-gateway-forward.conf +> server=/${bootstrap_cloudflare.domain}/${bootstrap_cloudflare.gateway_vip} +> ``` +> 2. Restart dnsmasq on the server. +> 3. Query an internal-only subdomain from your workstation (any `internal` class ingresses): `dig @${home-dns-server-ip} echo-server-internal.${bootstrap_cloudflare.domain}`. It should resolve to `${bootstrap_cloudflare.ingress_vip}`. + +If you're having trouble with DNS be sure to check out these two GitHub discussions: [Internal DNS](https://github.com/onedr0p/cluster-template/discussions/719) and [Pod DNS resolution broken](https://github.com/onedr0p/cluster-template/discussions/635). + +... Nothing working? That is expected, this is DNS after all! + +#### 📜 Certificates + +By default this template will deploy a wildcard certificate using the Let's Encrypt **staging environment**, which prevents you from getting rate-limited by the Let's Encrypt production servers if your cluster doesn't deploy properly (for example due to a misconfiguration). Once you are sure you will keep the cluster up for more than a few hours be sure to switch to the production servers as outlined in `config.yaml`. + +📍 _You will need a production certificate to reach internet-exposed applications through `cloudflared`._ + +#### 🪝 Github Webhook + +By default Flux will periodically check your git repository for changes. In order to have Flux reconcile on `git push` you must configure Github to send `push` events to Flux. + +> [!NOTE] +> This will only work after you have switched over certificates to the Let's Encrypt Production servers. + +1. Obtain the webhook path + + 📍 _Hook id and path should look like `/hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123`_ + + ```sh + kubectl -n flux-system get receiver github-receiver -o jsonpath='{.status.webhookPath}' + ``` + +2. Piece together the full URL with the webhook path appended + + ```text + https://flux-webhook.${bootstrap_cloudflare.domain}/hook/12ebd1e363c641dc3c2e430ecf3cee2b3c7a5ac9e1234506f6f5f3ce1230e123 + ``` + +3. Navigate to the settings of your repository on Github, under "Settings/Webhooks" press the "Add webhook" button. Fill in the webhook url and your `bootstrap_github_webhook_token` secret and save. + +## 💥 Nuke + +There might be a situation where you want to destroy your Kubernetes cluster. This will completely clean the OS of all traces of the Kubernetes distribution you chose and then reboot the nodes. + +```sh +# k3s: Remove all traces of k3s from the nodes +task ansible:run playbook=cluster-nuke +# Talos: Reset your nodes back to maintenance mode and reboot +task talos:soft-nuke +# Talos: Comletely format your the Talos installation and reboot +task talos:hard-nuke +``` + +## 🤖 Renovate + +[Renovate](https://www.mend.io/renovate) is a tool that automates dependency management. It is designed to scan your repository around the clock and open PRs for out-of-date dependencies it finds. Common dependencies it can discover are Helm charts, container images, GitHub Actions, Ansible roles... even Flux itself! Merging a PR will cause Flux to apply the update to your cluster. + +To enable Renovate, click the 'Configure' button over at their [Github app page](https://github.com/apps/renovate) and select your repository. Renovate creates a "Dependency Dashboard" as an issue in your repository, giving an overview of the status of all updates. The dashboard has interactive checkboxes that let you do things like advance scheduling or reattempt update PRs you closed without merging. + +The base Renovate configuration in your repository can be viewed at [.github/renovate.json5](./.github/renovate.json5). By default it is scheduled to be active with PRs every weekend, but you can [change the schedule to anything you want](https://docs.renovatebot.com/presets-schedule), or remove it if you want Renovate to open PRs right away. + +## 🐛 Debugging + +Below is a general guide on trying to debug an issue with an resource or application. For example, if a workload/resource is not showing up or a pod has started but in a `CrashLoopBackOff` or `Pending` state. + +1. Start by checking all Flux Kustomizations & Git Repository & OCI Repository and verify they are healthy. + + ```sh + flux get sources oci -A + flux get sources git -A + flux get ks -A + ``` + +2. Then check all the Flux Helm Releases and verify they are healthy. + + ```sh + flux get hr -A + ``` + +3. Then check the if the pod is present. + + ```sh + kubectl -n get pods -o wide + ``` + +4. Then check the logs of the pod if its there. + + ```sh + kubectl -n logs -f + # or + stern -n + ``` + +5. If a resource exists try to describe it to see what problems it might have. + + ```sh + kubectl -n describe + ``` + +6. Check the namespace events + + ```sh + kubectl -n get events --sort-by='.metadata.creationTimestamp' + ``` + +Resolving problems that you have could take some tweaking of your YAML manifests in order to get things working, other times it could be a external factor like permissions on NFS. If you are unable to figure out your problem see the help section below. + +## 👉 Help + +- Make a post in this repository's Github [Discussions](https://github.com/onedr0p/cluster-template/discussions). +- Start a thread in the `#support` or `#cluster-template` channels in the [Home Operations](https://discord.gg/home-operations) Discord server. + +## ❔ What's next + +The cluster is your oyster (or something like that). Below are some optional considerations you might want to review. + +#### Ship it + +To browse or get ideas on applications people are running, community member [@whazor](https://github.com/whazor) created [Kubesearch](https://kubesearch.dev) as a creative way to search Flux HelmReleases across Github and Gitlab. + +#### Storage + +The included CSI (openebs in local-hostpath mode) is a great start for storage but soon you might find you need more features like replicated block storage, or to connect to a NFS/SMB/iSCSI server. If you need any of those features be sure to check out the projects like [rook-ceph](https://github.com/rook/rook), [longhorn](https://github.com/longhorn/longhorn), [openebs](https://github.com/openebs/openebs), [democratic-csi](https://github.com/democratic-csi/democratic-csi), [csi-driver-nfs](https://github.com/kubernetes-csi/csi-driver-nfs), +and [synology-csi](https://github.com/SynologyOpenSource/synology-csi). + +## 🙌 Related Projects + +If this repo is too hot to handle or too cold to hold check out these following projects. + +- [khuedoan/homelab](https://github.com/khuedoan/homelab) - _Modern self-hosting framework, fully automated from empty disk to operating services with a single command._ +- [danmanners/aws-argo-cluster-template](https://github.com/danmanners/aws-argo-cluster-template) - _A community opinionated template for deploying Kubernetes clusters on-prem and in AWS using Pulumi, SOPS, Sealed Secrets, GitHub Actions, Renovate, Cilium and more!_ +- [ricsanfre/pi-cluster](https://github.com/ricsanfre/pi-cluster) - _Pi Kubernetes Cluster. Homelab kubernetes cluster automated with Ansible and ArgoCD_ +- [techno-tim/k3s-ansible](https://github.com/techno-tim/k3s-ansible) - _The easiest way to bootstrap a self-hosted High Availability Kubernetes cluster. A fully automated HA k3s etcd install with kube-vip, MetalLB, and more_ + +## ⭐ Stargazers + +
+ +[![Star History Chart](https://api.star-history.com/svg?repos=onedr0p/cluster-template&type=Date)](https://star-history.com/#onedr0p/cluster-template&Date) + +
+ +## 🤝 Thanks + +Big shout out to all the contributors, sponsors and everyone else who has helped on this project. diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100644 index 00000000..c510d73d --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,79 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +vars: + # Directories + ANSIBLE_DIR: "{{.ROOT_DIR}}/ansible" + BOOTSTRAP_DIR: "{{.ROOT_DIR}}/bootstrap" + KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes" + PRIVATE_DIR: "{{.ROOT_DIR}}/.private" + SCRIPTS_DIR: "{{.ROOT_DIR}}/scripts" + # Files + AGE_FILE: "{{.ROOT_DIR}}/age.key" + BOOTSTRAP_CONFIG_FILE: "{{.ROOT_DIR}}/config.yaml" + KUBECONFIG_FILE: "{{.ROOT_DIR}}/kubeconfig" + MAKEJINJA_CONFIG_FILE: "{{.ROOT_DIR}}/makejinja.toml" + PIP_REQUIREMENTS_FILE: "{{.ROOT_DIR}}/requirements.txt" + # Binaries + PYTHON_BIN: python3 + +env: + KUBECONFIG: "{{.KUBECONFIG_FILE}}" + PYTHONDONTWRITEBYTECODE: "1" + SOPS_AGE_KEY_FILE: "{{.AGE_FILE}}" + VIRTUAL_ENV: "{{.ROOT_DIR}}/.venv" + +includes: + ansible: .taskfiles/Ansible/Taskfile.yaml + kubernetes: + aliases: ["k8s"] + taskfile: .taskfiles/Kubernetes/Taskfile.yaml + flux: .taskfiles/Flux/Taskfile.yaml + repository: + aliases: ["repo"] + taskfile: .taskfiles/Repository/Taskfile.yaml + talos: .taskfiles/Talos/Taskfile.yaml + sops: .taskfiles/Sops/Taskfile.yaml + workstation: .taskfiles/Workstation/Taskfile.yaml + +tasks: + + default: task -l + + init: + desc: Initialize configuration files + cmds: + - mkdir -p {{.PRIVATE_DIR}} + - cp -n {{.BOOTSTRAP_CONFIG_FILE | replace ".yaml" ".sample.yaml"}} {{.BOOTSTRAP_CONFIG_FILE}} + - cmd: echo === Configuration file copied === + silent: true + - cmd: echo Proceed with updating the configuration files... + silent: true + - cmd: echo {{.BOOTSTRAP_CONFIG_FILE}} + silent: true + status: + - test -f "{{.BOOTSTRAP_CONFIG_FILE}}" + + configure: + desc: Configure repository from bootstrap vars + prompt: Any conflicting config in the root kubernetes and ansible directories will be overwritten... continue? + deps: ["workstation:direnv", "workstation:venv", "sops:age-keygen", "init"] + cmds: + - task: .template + - task: sops:encrypt + - task: .validate + + .template: + internal: true + cmd: "{{.VIRTUAL_ENV}}/bin/makejinja" + preconditions: + - { msg: "Missing virtual environment", sh: "test -d {{.VIRTUAL_ENV}}" } + - { msg: "Missing Makejinja config file", sh: "test -f {{.MAKEJINJA_CONFIG_FILE}}" } + - { msg: "Missing Makejinja plugin file", sh: "test -f {{.BOOTSTRAP_DIR}}/scripts/plugin.py" } + - { msg: "Missing bootstrap config file", sh: "test -f {{.BOOTSTRAP_CONFIG_FILE}}" } + + .validate: + internal: true + cmds: + - task: kubernetes:kubeconform diff --git a/bootstrap/overrides/readme.partial.yaml.j2 b/bootstrap/overrides/readme.partial.yaml.j2 new file mode 100644 index 00000000..36dac44d --- /dev/null +++ b/bootstrap/overrides/readme.partial.yaml.j2 @@ -0,0 +1,5 @@ +<% Place user jinja template overrides in this file's directory %> +<% Docs: https://mirkolenz.github.io/makejinja/makejinja.html %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/makejinja.toml %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input1/not-empty.yaml.jinja %> +<% Example: https://github.com/mirkolenz/makejinja/blob/main/tests/data/input2/not-empty.yaml.jinja %> diff --git a/bootstrap/scripts/plugin.py b/bootstrap/scripts/plugin.py new file mode 100644 index 00000000..57a0682b --- /dev/null +++ b/bootstrap/scripts/plugin.py @@ -0,0 +1,67 @@ +import importlib.util +import sys +from collections.abc import Callable +from pathlib import Path +from typing import Any + +from typing import Any +from netaddr import IPNetwork +from bcrypt import hashpw, gensalt + +import makejinja +import validation + +def encrypt(value: str) -> str: + return hashpw(value.encode(), gensalt(rounds=10)).decode("ascii") + + +def nthhost(value: str, query: int) -> str: + value = IPNetwork(value) + try: + nth = int(query) + if value.size > nth: + return str(value[nth]) + except ValueError: + return False + return value + + +def import_filter(file: Path) -> Callable[[dict[str, Any]], bool]: + module_path = file.relative_to(Path.cwd()).with_suffix("") + module_name = str(module_path).replace("/", ".") + spec = importlib.util.spec_from_file_location(module_name, file) + assert spec is not None + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + assert spec.loader is not None + spec.loader.exec_module(module) + return module.main + + +class Plugin(makejinja.plugin.Plugin): + def __init__(self, data: dict[str, Any], config: makejinja.config.Config): + self._data = data + self._config = config + + self._excluded_dirs: set[Path] = set() + for input_path in config.inputs: + for filter_file in input_path.rglob(".mjfilter.py"): + filter_func = import_filter(filter_file) + if filter_func(data) is False: + self._excluded_dirs.add(filter_file.parent) + + validation.validate(data) + + + def filters(self) -> makejinja.plugin.Filters: + return [encrypt, nthhost] + + + def path_filters(self): + return [self._mjfilter_func] + + + def _mjfilter_func(self, path: Path) -> bool: + return not any( + path.is_relative_to(excluded_dir) for excluded_dir in self._excluded_dirs + ) diff --git a/bootstrap/scripts/validation.py b/bootstrap/scripts/validation.py new file mode 100644 index 00000000..ef3e0bb6 --- /dev/null +++ b/bootstrap/scripts/validation.py @@ -0,0 +1,58 @@ +from functools import wraps +from shutil import which +from typing import Callable +from zoneinfo import available_timezones +import netaddr +import sys + +DISTRIBUTIONS = ["k3s", "talos"] +GLOBAL_CLI_TOOLS = ["age", "cloudflared", "flux", "helmfile", "sops", "jq", "kubeconform", "kustomize"] +TALOS_CLI_TOOLS = ["talosctl", "talhelper"] + +def required(*keys: str): + def wrapper_outter(func: Callable): + @wraps(func) + def wrapper(data: dict, *args, **kwargs) -> None: + for key in keys: + if data.get(key) is None: + raise ValueError(f"Missing required key {key}") + return func(*[data[key] for key in keys], **kwargs) + return wrapper + return wrapper_outter + + +def validate_python_version() -> None: + required_version = (3, 11, 0) + if sys.version_info < required_version: + raise ValueError(f"Python version is below 3.11. Please upgrade.") + + +@required("bootstrap_distribution") +def validate_cli_tools(distribution: dict, **_) -> None: + if distribution not in DISTRIBUTIONS: + raise ValueError(f"Invalid distribution {distribution}") + for tool in GLOBAL_CLI_TOOLS: + if not which(tool): + raise ValueError(f"Missing required CLI tool {tool}") + for tool in TALOS_CLI_TOOLS if distribution in ["talos"] else []: + if not which(tool): + raise ValueError(f"Missing required CLI tool {tool}") + + +@required("bootstrap_distribution") +def validate_distribution(distribution: dict, **_) -> None: + if distribution not in DISTRIBUTIONS: + raise ValueError(f"Invalid distribution {distribution}") + + +@required("bootstrap_timezone") +def validate_timezone(timezone: str, **_) -> None: + if timezone not in available_timezones(): + raise ValueError(f"Invalid timezone {timezone}") + + +def validate(data: dict) -> None: + validate_python_version() + validate_cli_tools(data) + validate_distribution(data) + validate_timezone(data) diff --git a/bootstrap/templates/.sops.yaml.j2 b/bootstrap/templates/.sops.yaml.j2 new file mode 100644 index 00000000..4cec5261 --- /dev/null +++ b/bootstrap/templates/.sops.yaml.j2 @@ -0,0 +1,20 @@ +--- +creation_rules: + #% if bootstrap_distribution in ["talos"] %# + - # IMPORTANT: This rule MUST be above the others + path_regex: talos/.*\.sops\.ya?ml + key_groups: + - age: + - "#{ bootstrap_sops_age_pubkey }#" + #% endif %# + - path_regex: kubernetes/.*\.sops\.ya?ml + encrypted_regex: "^(data|stringData)$" + key_groups: + - age: + - "#{ bootstrap_sops_age_pubkey }#" + #% if bootstrap_distribution in ["k3s"] %# + - path_regex: ansible/.*\.sops\.ya?ml + key_groups: + - age: + - "#{ bootstrap_sops_age_pubkey }#" + #% endif %# diff --git a/bootstrap/templates/ansible/.ansible-lint.j2 b/bootstrap/templates/ansible/.ansible-lint.j2 new file mode 100644 index 00000000..36f6b441 --- /dev/null +++ b/bootstrap/templates/ansible/.ansible-lint.j2 @@ -0,0 +1,9 @@ +skip_list: + - yaml[commas] + - yaml[line-length] + - var-naming +warn_list: + - command-instead-of-shell + - deprecated-command-syntax + - experimental + - no-changed-when diff --git a/bootstrap/templates/ansible/.mjfilter.py b/bootstrap/templates/ansible/.mjfilter.py new file mode 100644 index 00000000..0979f9a6 --- /dev/null +++ b/bootstrap/templates/ansible/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 new file mode 100644 index 00000000..a5796e35 --- /dev/null +++ b/bootstrap/templates/ansible/inventory/group_vars/controllers/main.yaml.j2 @@ -0,0 +1,40 @@ +--- +k3s_control_node: true +k3s_server: + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + cluster-cidr: "#{ bootstrap_pod_network.split(',')[0] }#,#{ bootstrap_pod_network.split(',')[1] }#" + service-cidr: "#{ bootstrap_service_network.split(',')[0] }#,#{ bootstrap_service_network.split(',')[1] }#" + #% else %# + cluster-cidr: "#{ bootstrap_pod_network }#" + service-cidr: "#{ bootstrap_service_network }#" + #% endif %# + disable: ["flannel", "local-storage", "metrics-server", "servicelb", "traefik"] + disable-cloud-controller: true + disable-kube-proxy: true + disable-network-policy: true + docker: false + embedded-registry: true + etcd-expose-metrics: true + flannel-backend: "none" + kube-apiserver-arg: + - "anonymous-auth=true" + kube-controller-manager-arg: + - "bind-address=0.0.0.0" + kube-scheduler-arg: + - "bind-address=0.0.0.0" + kubelet-arg: + - "image-gc-high-threshold=55" + - "image-gc-low-threshold=50" + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}" + #% else %# + node-ip: "{{ ansible_host }}" + #% endif %# + pause-image: registry.k8s.io/pause:3.9 + secrets-encryption: true + tls-san: + - "#{ bootstrap_controllers_vip }#" + #% for item in bootstrap_tls_sans %# + - "#{ item }#" + #% endfor %# + write-kubeconfig-mode: "644" diff --git a/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 new file mode 100644 index 00000000..bf1aeb1b --- /dev/null +++ b/bootstrap/templates/ansible/inventory/group_vars/kubernetes/main.yaml.j2 @@ -0,0 +1,23 @@ +--- +k3s_become: true +k3s_etcd_datastore: true +k3s_install_hard_links: true +k3s_registration_address: "#{ bootstrap_controllers_vip }#" +k3s_registries: + mirrors: + docker.io: + gcr.io: + ghcr.io: + k8s.gcr.io: + lscr.io: + mcr.microsoft.com: + public.ecr.aws: + quay.io: + registry.k8s.io: +# renovate: datasource=github-releases depName=k3s-io/k3s +k3s_release_version: v1.29.1+k3s2 +k3s_server_manifests_templates: + - custom-cilium-helmchart.yaml + - kube-vip-ds.yaml + - kube-vip-rbac.yaml +k3s_use_unsupported_config: true diff --git a/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py b/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py new file mode 100644 index 00000000..8fb17eac --- /dev/null +++ b/bootstrap/templates/ansible/inventory/group_vars/workers/.mjfilter.py @@ -0,0 +1,10 @@ +main = lambda data: ( + data.get("bootstrap_distribution", "k3s") in ["k3s"] and + len( + list( + filter( + lambda item: "controller" in item and item["controller"] is False, data.get("bootstrap_node_inventory") + ) + ) + ) > 0 +) diff --git a/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 b/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 new file mode 100644 index 00000000..428852e0 --- /dev/null +++ b/bootstrap/templates/ansible/inventory/group_vars/workers/main.yaml.j2 @@ -0,0 +1,12 @@ +--- +k3s_control_node: false +k3s_agent: + kubelet-arg: + - "image-gc-high-threshold=55" + - "image-gc-low-threshold=50" + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + node-ip: "{{ ansible_host }},{{ ansible_default_ipv6.address }}" + #% else %# + node-ip: "{{ ansible_host }}" + #% endif %# + pause-image: registry.k8s.io/pause:3.9 diff --git a/bootstrap/templates/ansible/inventory/hosts.yaml.j2 b/bootstrap/templates/ansible/inventory/hosts.yaml.j2 new file mode 100644 index 00000000..8960a23d --- /dev/null +++ b/bootstrap/templates/ansible/inventory/hosts.yaml.j2 @@ -0,0 +1,23 @@ +--- +kubernetes: + children: + controllers: + hosts: + #% for item in bootstrap_node_inventory %# + #% if item.controller %# + "#{ item.name }#": + ansible_user: "#{ item.ssh_user }#" + ansible_host: "#{ item.address }#" + #% endif %# + #% endfor %# + #% if bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length %# + workers: + hosts: + #% for item in bootstrap_node_inventory %# + #% if not item.controller %# + "#{ item.name }#": + ansible_user: "#{ item.ssh_user }#" + ansible_host: "#{ item.address }#" + #% endif %# + #% endfor %# + #% endif %# diff --git a/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 new file mode 100644 index 00000000..54c2f87a --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/cluster-installation.yaml.j2 @@ -0,0 +1,60 @@ +--- +- name: Cluster Installation + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Check if cluster is installed + check_mode: false + ansible.builtin.stat: + path: /etc/rancher/k3s/config.yaml + register: k3s_installed + + - name: Ignore manifests templates and urls if the cluster is already installed + when: k3s_installed.stat.exists + ansible.builtin.set_fact: + k3s_server_manifests_templates: [] + k3s_server_manifests_urls: [] + + - name: Prevent downgrades + when: k3s_installed.stat.exists + ansible.builtin.include_tasks: tasks/version-check.yaml + + - name: Install Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: installed + + - name: Kubeconfig + ansible.builtin.include_tasks: tasks/kubeconfig.yaml + + - name: Wait for custom manifests to rollout + when: + - k3s_primary_control_node + - (k3s_server_manifests_templates | length > 0 + or k3s_server_manifests_urls | length > 0) + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: "{{ item.kind }}" + name: "{{ item.name }}" + namespace: "{{ item.namespace | default('') }}" + wait: true + wait_sleep: 10 + wait_timeout: 360 + loop: + - { name: cilium, kind: HelmChart, namespace: kube-system } + + - name: Cilium + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/cilium.yaml + + - name: Cruft + when: k3s_primary_control_node + ansible.builtin.include_tasks: tasks/cruft.yaml diff --git a/bootstrap/templates/ansible/playbooks/cluster-kube-vip.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-kube-vip.yaml.j2 new file mode 100644 index 00000000..db8ce71c --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/cluster-kube-vip.yaml.j2 @@ -0,0 +1,24 @@ +--- +- name: Cluster kube-vip + hosts: controllers + serial: 1 + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Ensure Kubernetes is running + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: started + + - name: Upgrade kube-vip + ansible.builtin.template: + src: templates/kube-vip-ds.yaml + dest: "{{ k3s_server_manifests_dir }}/kube-vip-ds.yaml" + mode: preserve diff --git a/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 new file mode 100644 index 00000000..a99265b8 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/cluster-nuke.yaml.j2 @@ -0,0 +1,101 @@ +--- +- name: Cluster Nuke + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + vars_prompt: + - name: nuke + prompt: |- + Are you sure you want to nuke this cluster? + Type 'YES I WANT TO DESTROY THIS CLUSTER' to proceed + default: "n" + private: false + pre_tasks: + - name: Check for confirmation + ansible.builtin.fail: + msg: Aborted nuking the cluster + when: nuke != 'YES I WANT TO DESTROY THIS CLUSTER' + + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Stop Kubernetes # noqa: ignore-errors + ignore_errors: true + block: + - name: Stop Kubernetes + ansible.builtin.include_role: + name: xanmanning.k3s + public: true + vars: + k3s_state: stopped + + # https://github.com/k3s-io/docs/blob/main/docs/installation/network-options.md + - name: Networking + block: + - name: Networking | Delete Cilium links + ansible.builtin.command: + cmd: "ip link delete {{ item }}" + removes: "/sys/class/net/{{ item }}" + loop: ["cilium_host", "cilium_net", "cilium_vxlan"] + - name: Networking | Flush iptables + ansible.builtin.iptables: + table: "{{ item }}" + flush: true + loop: ["filter", "nat", "mangle", "raw"] + - name: Networking | Flush ip6tables + ansible.builtin.iptables: + table: "{{ item }}" + flush: true + ip_version: ipv6 + loop: ["filter", "nat", "mangle", "raw"] + - name: Networking | Delete CNI directory + ansible.builtin.file: + path: /etc/cni/net.d + state: absent + + - name: Check to see if k3s-killall.sh exits + ansible.builtin.stat: + path: /usr/local/bin/k3s-killall.sh + register: check_k3s_killall_script + + - name: Check to see if k3s-uninstall.sh exits + ansible.builtin.stat: + path: /usr/local/bin/k3s-uninstall.sh + register: check_k3s_uninstall_script + + - name: Run k3s-killall.sh + when: check_k3s_killall_script.stat.exists + ansible.builtin.command: + cmd: /usr/local/bin/k3s-killall.sh + register: k3s_killall + changed_when: k3s_killall.rc == 0 + + - name: Run k3s-uninstall.sh + when: check_k3s_uninstall_script.stat.exists + ansible.builtin.command: + cmd: /usr/local/bin/k3s-uninstall.sh + args: + removes: /usr/local/bin/k3s-uninstall.sh + register: k3s_uninstall + changed_when: k3s_uninstall.rc == 0 + + - name: Ensure hard links are removed + when: + - k3s_install_hard_links + - not ansible_check_mode + ansible.builtin.file: + path: "{{ k3s_install_dir }}/{{ item }}" + state: absent + loop: ["kubectl", "crictl", "ctr"] + + - name: Remove local storage path + ansible.builtin.file: + path: /var/openebs/local + state: absent + + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting hosts + reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 new file mode 100644 index 00000000..364418bc --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/cluster-prepare.yaml.j2 @@ -0,0 +1,113 @@ +--- +- name: Prepare System + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + - name: Populate service facts + ansible.builtin.service_facts: + tasks: + - name: Locale + block: + - name: Locale | Set timezone + community.general.timezone: + name: "#{ bootstrap_timezone }#" + + - name: Packages + block: + - name: Packages | Install + ansible.builtin.apt: + name: apt-transport-https,ca-certificates,conntrack,curl,dirmngr,gdisk,gnupg,hdparm,htop, + iptables,iputils-ping,ipvsadm,libseccomp2,lm-sensors,net-tools,nfs-common, + nvme-cli,open-iscsi,parted,psmisc,python3,python3-apt,python3-kubernetes,python3-yaml, + smartmontools,socat,software-properties-common,unzip,util-linux + install_recommends: false + + - name: Network Configuration + notify: Reboot + block: + - name: Network Configuration | Set hostname + ansible.builtin.hostname: + name: "{{ inventory_hostname }}" + - name: Network Configuration | Update hosts + ansible.builtin.copy: + content: | + 127.0.0.1 localhost + 127.0.1.1 {{ inventory_hostname }} + + # The following lines are desirable for IPv6 capable hosts + ::1 localhost ip6-localhost ip6-loopback + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + dest: /etc/hosts + mode: preserve + # https://github.com/onedr0p/cluster-template/discussions/635 + - name: Network Configuration | Remove immutable flag from /etc/resolv.conf + ansible.builtin.file: + attributes: -i + path: /etc/resolv.conf + - name: Network Configuration | Remove /etc/resolv.conf + ansible.builtin.file: + attributes: -i + path: /etc/resolv.conf + state: absent + - name: Network Configuration | Add custom /etc/resolv.conf + ansible.builtin.copy: + attributes: +i + mode: '0644' + dest: /etc/resolv.conf + content: | + search #{ bootstrap_search_domain|default('.', true) }# + #% for item in bootstrap_dns_servers | default(['1.1.1.1', '1.0.0.1']) %# + nameserver #{ item }# + #% endfor %# + + - name: System Configuration + notify: Reboot + block: + - name: System Configuration | Disable apparmor + when: ansible_facts.services['apparmor.service'] is defined + ansible.builtin.systemd: + name: apparmor + state: stopped + masked: true + - name: System Configuration | Disable swap + ansible.posix.mount: + name: "{{ item }}" + fstype: swap + state: absent + loop: ["none", "swap"] + - name: System Configuration | Create Kernel modules + ansible.builtin.copy: + dest: "/etc/modules-load.d/{{ item }}.conf" + mode: "0644" + content: "{{ item }}" + loop: ["br_netfilter", "ceph", "ip_vs", "ip_vs_rr", "nbd", "overlay", "rbd"] + register: modules_status + - name: System Configuration | Reload Kernel modules # noqa: no-changed-when no-handler + when: modules_status.changed + ansible.builtin.systemd: + name: systemd-modules-load + state: restarted + - name: System Configuration | Sysctl + ansible.posix.sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + sysctl_file: /etc/sysctl.d/99-kubernetes.conf + reload: true + with_dict: "{{ sysctl_config }}" + vars: + sysctl_config: + fs.inotify.max_queued_events: 65536 + fs.inotify.max_user_watches: 524288 + fs.inotify.max_user_instances: 8192 + + handlers: + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting hosts + reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 new file mode 100644 index 00000000..6fe1fd0d --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/cluster-reboot.yaml.j2 @@ -0,0 +1,15 @@ +--- +- name: Reboot + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Reboot + ansible.builtin.reboot: + msg: Rebooting hosts + reboot_timeout: 3600 diff --git a/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 b/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 new file mode 100644 index 00000000..acad8fd6 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/cluster-rollout-update.yaml.j2 @@ -0,0 +1,70 @@ +--- +- name: Cluster rollout update + hosts: kubernetes + become: true + gather_facts: true + any_errors_fatal: true + serial: 1 + pre_tasks: + - name: Pausing for 5 seconds... + ansible.builtin.pause: + seconds: 5 + tasks: + - name: Details + ansible.builtin.command: "k3s kubectl get node {{ inventory_hostname }} -o json" + register: kubectl_get_node + delegate_to: "{{ groups['controllers'][0] }}" + failed_when: false + changed_when: false + + - name: Update + when: + # When status.conditions[x].type == Ready then check stats.conditions[x].status for True|False + - kubectl_get_node['stdout'] | from_json | json_query("status.conditions[?type == 'Ready'].status") + # If spec.unschedulable is defined then the node is cordoned + - not (kubectl_get_node['stdout'] | from_json).spec.unschedulable is defined + block: + - name: Cordon + kubernetes.core.k8s_drain: + name: "{{ inventory_hostname }}" + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: cordon + delegate_to: "{{ groups['controllers'][0] }}" + + - name: Drain + kubernetes.core.k8s_drain: + name: "{{ inventory_hostname }}" + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: drain + delete_options: + delete_emptydir_data: true + ignore_daemonsets: true + terminate_grace_period: 600 + wait_timeout: 900 + pod_selectors: + - app!=rook-ceph-osd # Rook Ceph + delegate_to: "{{ groups['controllers'][0] }}" + + - name: Update + ansible.builtin.apt: + upgrade: dist + update_cache: true + + - name: Check if reboot is required + ansible.builtin.stat: + path: /var/run/reboot-required + register: reboot_required + + - name: Reboot + when: reboot_required.stat.exists + ansible.builtin.reboot: + msg: Rebooting node + post_reboot_delay: 60 + reboot_timeout: 3600 + + - name: Uncordon + kubernetes.core.k8s_drain: + name: "{{ inventory_hostname }}" + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: uncordon + delegate_to: "{{ groups['controllers'][0] }}" diff --git a/bootstrap/templates/ansible/playbooks/tasks/cilium.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/cilium.yaml.j2 new file mode 100644 index 00000000..ca242bb0 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/tasks/cilium.yaml.j2 @@ -0,0 +1,56 @@ +--- +- name: Cilium + block: + - name: Cilium | Check if Cilium HelmChart exists + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + register: cilium_helmchart + + - name: Cilium | Wait for Cilium to rollout + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: helm-install-cilium + kind: Job + namespace: kube-system + wait: true + wait_condition: + type: Complete + status: true + wait_timeout: 360 + + - name: Cilium | Patch the Cilium HelmChart to unmanage it + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s_json_patch: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + patch: + - op: add + path: /metadata/annotations/helmcharts.helm.cattle.io~1unmanaged + value: "true" + + - name: Cilium | Delete the Cilium HelmChart CR + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: absent + + - name: Cilium | Force delete the Cilium HelmChart + when: cilium_helmchart.resources | count > 0 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: cilium + kind: HelmChart + namespace: kube-system + state: patched + definition: + metadata: + finalizers: [] diff --git a/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 new file mode 100644 index 00000000..73697476 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/tasks/cruft.yaml.j2 @@ -0,0 +1,31 @@ +--- +- name: Cruft + block: + - name: Cruft | Get list of custom manifests + ansible.builtin.find: + paths: "{{ k3s_server_manifests_dir }}" + file_type: file + use_regex: true + patterns: ["^custom-.*"] + register: custom_manifest + + - name: Cruft | Delete custom manifests + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ custom_manifest.files }}" + + - name: Cruft | Get list of custom addons + kubernetes.core.k8s_info: + kubeconfig: /etc/rancher/k3s/k3s.yaml + kind: Addon + register: addons_list + + - name: Cruft | Delete addons + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + name: "{{ item.metadata.name }}" + kind: Addon + namespace: kube-system + state: absent + loop: "{{ addons_list.resources | selectattr('metadata.name', 'match', '^custom-.*') | list }}" diff --git a/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 new file mode 100644 index 00000000..56bf684e --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/tasks/kubeconfig.yaml.j2 @@ -0,0 +1,26 @@ +--- +- name: Get absolute path to this Git repository # noqa: command-instead-of-module + ansible.builtin.command: git rev-parse --show-toplevel + delegate_to: localhost + become: false + run_once: true + register: repository_path + changed_when: false + check_mode: false + failed_when: repository_path.rc != 0 + +- name: Copy kubeconfig to the project directory + when: k3s_primary_control_node + ansible.builtin.fetch: + src: /etc/rancher/k3s/k3s.yaml + dest: "{{ repository_path.stdout }}/kubeconfig" + flat: true + +- name: Update kubeconfig with the correct load balancer address + delegate_to: localhost + become: false + run_once: true + ansible.builtin.replace: + path: "{{ repository_path.stdout }}/kubeconfig" + regexp: https://127.0.0.1:6443 + replace: "https://{{ k3s_registration_address }}:6443" diff --git a/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 b/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 new file mode 100644 index 00000000..56e56702 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/tasks/version-check.yaml.j2 @@ -0,0 +1,17 @@ +--- +- name: Version Check + block: + - name: Get deployed k3s version + ansible.builtin.command: k3s --version + register: k3s_version + changed_when: false + failed_when: false + + - name: Extract k3s version + ansible.builtin.set_fact: + current_k3s_version: "{{ k3s_version.stdout | regex_replace('(?im)k3s version (?P[a-z0-9\\.\\+]+).*\n.*', '\\g') }}" + + - name: Check if upgrades are allowed + ansible.builtin.assert: + that: ["k3s_release_version is version(current_k3s_version, '>=')"] + fail_msg: "Unable to upgrade k3s because the deployed version is higher than the one specified in the configuration" diff --git a/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 new file mode 100644 index 00000000..12aa1010 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/templates/custom-cilium-helmchart.yaml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: cilium + namespace: kube-system +spec: + repo: https://helm.cilium.io/ + chart: cilium + # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io + version: 1.15.1 + targetNamespace: kube-system + bootstrap: true + valuesContent: |- + #% filter indent(width=4, first=True) %# + #% include 'partials/cilium-values-init.partial.yaml.j2' %# + #% endfilter %# diff --git a/bootstrap/templates/ansible/playbooks/templates/kube-vip-ds.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/kube-vip-ds.yaml.j2 new file mode 100644 index 00000000..f62cab4d --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/templates/kube-vip-ds.yaml.j2 @@ -0,0 +1,2 @@ +--- +#% include 'partials/kube-vip-ds.partial.yaml.j2' %# diff --git a/bootstrap/templates/ansible/playbooks/templates/kube-vip-rbac.yaml.j2 b/bootstrap/templates/ansible/playbooks/templates/kube-vip-rbac.yaml.j2 new file mode 100644 index 00000000..481c2e82 --- /dev/null +++ b/bootstrap/templates/ansible/playbooks/templates/kube-vip-rbac.yaml.j2 @@ -0,0 +1,2 @@ +--- +#% include 'partials/kube-vip-rbac.partial.yaml.j2' %# diff --git a/bootstrap/templates/ansible/requirements.txt.j2 b/bootstrap/templates/ansible/requirements.txt.j2 new file mode 100644 index 00000000..e7d9e4dc --- /dev/null +++ b/bootstrap/templates/ansible/requirements.txt.j2 @@ -0,0 +1,4 @@ +ansible-lint==24.2.0 +ansible==9.2.0 +jmespath==1.0.1 +openshift==0.13.2 diff --git a/bootstrap/templates/ansible/requirements.yaml.j2 b/bootstrap/templates/ansible/requirements.yaml.j2 new file mode 100644 index 00000000..4fb53353 --- /dev/null +++ b/bootstrap/templates/ansible/requirements.yaml.j2 @@ -0,0 +1,14 @@ +--- +collections: + - name: ansible.posix + version: 1.5.4 + - name: ansible.utils + version: 3.1.0 + - name: community.general + version: 8.3.0 + - name: kubernetes.core + version: 3.0.0 +roles: + - name: xanmanning.k3s + src: https://github.com/PyratLabs/ansible-role-k3s + version: v3.4.4 diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..34b1a211 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml.j2 @@ -0,0 +1,37 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager + version: v1.14.3 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + installCRDs: true + dns01RecursiveNameservers: 1.1.1.1:53,9.9.9.9:53 + dns01RecursiveNameserversOnly: true + podDnsPolicy: None + podDnsConfig: + nameservers: + - "1.1.1.1" + - "9.9.9.9" + prometheus: + enabled: true + servicemonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py new file mode 100644 index 00000000..d9ae82b4 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_cloudflare", {}).get("enabled", False) == True diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml.j2 new file mode 100644 index 00000000..1cf7148a --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/issuers.yaml.j2 @@ -0,0 +1,39 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: "${SECRET_ACME_EMAIL}" + privateKeySecretRef: + name: letsencrypt-production + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cert-manager-secret + key: api-token + selector: + dnsZones: + - "${SECRET_DOMAIN}" +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + server: https://acme-staging-v02.api.letsencrypt.org/directory + email: "${SECRET_ACME_EMAIL}" + privateKeySecretRef: + name: letsencrypt-staging + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cert-manager-secret + key: api-token + selector: + dnsZones: + - "${SECRET_DOMAIN}" diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml.j2 new file mode 100644 index 00000000..17754be6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + - ./issuers.yaml diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 new file mode 100644 index 00000000..f5bf887f --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/issuers/secret.sops.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-manager-secret +stringData: + api-token: "#{ bootstrap_cloudflare.token }#" diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 new file mode 100644 index 00000000..3efe99d8 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/cert-manager/ks.yaml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/cert-manager/cert-manager/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +#% if bootstrap_cloudflare.enabled %# +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager-issuers + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager + path: ./kubernetes/apps/cert-manager/cert-manager/issuers + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/kustomization.yaml.j2 new file mode 100644 index 00000000..a0a3e5ed --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cert-manager/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/cert-manager/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/cert-manager/namespace.yaml.j2 new file mode 100644 index 00000000..ed788350 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/cert-manager/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 new file mode 100644 index 00000000..10587f8c --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./webhooks/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/flux-system/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/namespace.yaml.j2 new file mode 100644 index 00000000..b48db452 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 new file mode 100644 index 00000000..e704eed3 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/ingress.yaml.j2 @@ -0,0 +1,25 @@ +#% if bootstrap_cloudflare.enabled %# +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: flux-webhook + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" +spec: + ingressClassName: external + rules: + - host: &host "flux-webhook.${SECRET_DOMAIN}" + http: + paths: + - path: /hook/ + pathType: Prefix + backend: + service: + name: webhook-receiver + port: + number: 80 + tls: + - hosts: + - *host +#% endif %# diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 new file mode 100644 index 00000000..75fc5841 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/kustomization.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./secret.sops.yaml + #% if bootstrap_cloudflare.enabled %# + - ./ingress.yaml + #% endif %# + - ./receiver.yaml diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 new file mode 100644 index 00000000..cca5931b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/receiver.yaml.j2 @@ -0,0 +1,25 @@ +--- +apiVersion: notification.toolkit.fluxcd.io/v1 +kind: Receiver +metadata: + name: github-receiver +spec: + type: github + events: + - ping + - push + secretRef: + name: github-webhook-token-secret + resources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: GitRepository + name: home-kubernetes + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster-apps + namespace: flux-system diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 new file mode 100644 index 00000000..34ac7daf --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/github/secret.sops.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-webhook-token-secret +stringData: + token: "#{ bootstrap_github_webhook_token }#" diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 new file mode 100644 index 00000000..ccd8b3eb --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 new file mode 100644 index 00000000..e80c50b2 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/flux-system/webhooks/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app flux-webhooks + namespace: flux-system +spec: + targetNamespace: flux-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/flux-system/webhooks/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-bgp.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-bgp.yaml.j2 new file mode 100644 index 00000000..7e15b6f6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-bgp.yaml.j2 @@ -0,0 +1,37 @@ +--- +# https://docs.cilium.io/en/latest/network/bgp-control-plane/ +apiVersion: cilium.io/v2alpha1 +kind: CiliumBGPPeeringPolicy +metadata: + name: policy +spec: + nodeSelector: + matchLabels: + kubernetes.io/os: linux + virtualRouters: + - localASN: #{ bootstrap_bgp.local_asn }# + neighbors: + #% if bootstrap_bgp.peers %# + #% for item in bootstrap_bgp.peers %# + - peerAddress: "#{ item }#/32" + peerASN: #{ bootstrap_bgp.peer_asn }# + #% endfor %# + #% else %# + #% if bootstrap_node_default_gateway %# + - peerAddress: "#{ bootstrap_node_default_gateway }#/32" + #% else %# + - peerAddress: "#{ bootstrap_node_network | nthhost(1) }#/32" + #% endif %# + peerASN: #{ bootstrap_bgp.peer_asn }# + #% endif %# + serviceSelector: + matchExpressions: + - {key: somekey, operator: NotIn, values: ['never-used-value']} +--- +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: pool +spec: + cidrs: + - cidr: "${BGP_ADVERTISED_CIDR}" diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 new file mode 100644 index 00000000..caa35cab --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/cilium-l2.yaml.j2 @@ -0,0 +1,22 @@ +--- +# https://docs.cilium.io/en/latest/network/l2-announcements +apiVersion: cilium.io/v2alpha1 +kind: CiliumL2AnnouncementPolicy +metadata: + name: policy +spec: + loadBalancerIPs: true + # NOTE: This might need to be set if you have more than one active NIC on your hosts + # interfaces: + # - ^eno[0-9]+ + nodeSelector: + matchLabels: + kubernetes.io/os: linux +--- +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: pool +spec: + cidrs: + - cidr: "${NODE_CIDR}" diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..2acdaf57 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/helmrelease.yaml.j2 @@ -0,0 +1,28 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: cilium +spec: + interval: 30m + chart: + spec: + chart: cilium + version: 1.15.1 + sourceRef: + kind: HelmRepository + name: cilium + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + #% filter indent(width=4, first=True) %# + #% include 'partials/cilium-values-full.partial.yaml.j2' %# + #% endfilter %# diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 new file mode 100644 index 00000000..7da0fa0d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/app/kustomization.yaml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + #% if bootstrap_bgp.enabled %# + - ./cilium-bgp.yaml + #% endif %# + #% if ((not bootstrap_bgp.enabled) and (not bootstrap_feature_gates.dual_stack_ipv4_first)) %# + - ./cilium-l2.yaml + #% endif %# + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 new file mode 100644 index 00000000..34fbd9a9 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/cilium/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/cilium/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py new file mode 100644 index 00000000..3ace63df --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..de34fa4c --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml.j2 @@ -0,0 +1,32 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: kubelet-csr-approver +spec: + interval: 30m + chart: + spec: + chart: kubelet-csr-approver + version: 1.0.7 + sourceRef: + kind: HelmRepository + name: postfinance + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + #% filter indent(width=4, first=True) %# + #% include 'partials/kubelet-csr-approver-values.partial.yaml.j2' %# + #% endfilter %# + metrics: + enable: true + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 new file mode 100644 index 00000000..adfb4940 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kubelet-csr-approver/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kubelet-csr-approver + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/kubelet-csr-approver/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 new file mode 100644 index 00000000..f1547936 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/kustomization.yaml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cilium/ks.yaml + #% if bootstrap_distribution in ["talos"] %# + - ./kubelet-csr-approver/ks.yaml + #% endif %# + - ./metrics-server/ks.yaml + #% if bootstrap_distribution in ["talos"] %# + - ./spegel/ks.yaml + #% endif %# + - ./reloader/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..1c435f4e --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/helmrelease.yaml.j2 @@ -0,0 +1,34 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: metrics-server +spec: + interval: 30m + chart: + spec: + chart: metrics-server + version: 3.12.0 + sourceRef: + kind: HelmRepository + name: metrics-server + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + args: + - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + metrics: + enabled: true + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 new file mode 100644 index 00000000..244f53c1 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/metrics-server/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app metrics-server + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/metrics-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/namespace.yaml.j2 new file mode 100644 index 00000000..5eeb2c91 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..8f636562 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/helmrelease.yaml.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: reloader +spec: + interval: 30m + chart: + spec: + chart: reloader + version: 1.0.69 + sourceRef: + kind: HelmRepository + name: stakater + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + fullnameOverride: reloader + reloader: + readOnlyRootFileSystem: true + podMonitor: + enabled: true + namespace: "{{ .Release.Namespace }}" diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 new file mode 100644 index 00000000..9aa42993 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/reloader/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app reloader + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/reloader/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py b/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py new file mode 100644 index 00000000..3ace63df --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..50c00d47 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/helmrelease.yaml.j2 @@ -0,0 +1,33 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: spegel +spec: + interval: 30m + chart: + spec: + chart: spegel + version: v0.0.18 + sourceRef: + kind: HelmRepository + name: xenitab + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + spegel: + containerdSock: /run/containerd/containerd.sock + containerdRegistryConfigPath: /etc/cri/conf.d/hosts + service: + registry: + hostPort: 29999 + serviceMonitor: + enabled: true diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 new file mode 100644 index 00000000..83c730b0 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/kube-system/spegel/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app spegel + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/kube-system/spegel/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/.mjfilter.py b/bootstrap/templates/kubernetes/apps/network/.mjfilter.py new file mode 100644 index 00000000..d9ae82b4 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_cloudflare", {}).get("enabled", False) == True diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 new file mode 100644 index 00000000..05bcef5c --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/configs/config.yaml.j2 @@ -0,0 +1,10 @@ +--- +originRequest: + originServerName: "external.${SECRET_DOMAIN}" + +ingress: + - hostname: "${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - hostname: "*.${SECRET_DOMAIN}" + service: https://ingress-nginx-external-controller.network.svc.cluster.local:443 + - service: http_status:404 diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 new file mode 100644 index 00000000..43d7d7b2 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/dnsendpoint.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: cloudflared +spec: + endpoints: + - dnsName: "external.${SECRET_DOMAIN}" + recordType: CNAME + targets: ["${SECRET_CLOUDFLARE_TUNNEL_ID}.cfargotunnel.com"] diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..0a515b4c --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/helmrelease.yaml.j2 @@ -0,0 +1,109 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: cloudflared +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 2.6.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + controllers: + main: + replicas: 2 + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + main: + image: + repository: docker.io/cloudflare/cloudflared + tag: 2024.2.1 + env: + NO_AUTOUPDATE: true + TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json + TUNNEL_METRICS: 0.0.0.0:8080 + TUNNEL_ORIGIN_ENABLE_HTTP2: true + TUNNEL_TRANSPORT_PROTOCOL: quic + TUNNEL_POST_QUANTUM: true + TUNNEL_ID: + valueFrom: + secretKeyRef: + name: cloudflared-secret + key: TUNNEL_ID + args: + - tunnel + - --config + - /etc/cloudflared/config/config.yaml + - run + - "$(TUNNEL_ID)" + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ready + port: &port 8080 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: false + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + pod: + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + service: + main: + ports: + http: + port: *port + serviceMonitor: + main: + enabled: true + persistence: + config: + enabled: true + type: configMap + name: cloudflared-configmap + globalMounts: + - path: /etc/cloudflared/config/config.yaml + subPath: config.yaml + readOnly: true + creds: + type: secret + name: cloudflared-secret + globalMounts: + - path: /etc/cloudflared/creds/credentials.json + subPath: credentials.json + readOnly: true diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 new file mode 100644 index 00000000..891a864a --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/kustomization.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./dnsendpoint.yaml + - ./secret.sops.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: cloudflared-configmap + files: + - ./configs/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 new file mode 100644 index 00000000..67d169ed --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/app/secret.sops.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudflared-secret +stringData: + TUNNEL_ID: "#{ bootstrap_cloudflare.tunnel.id }#" + credentials.json: | + { + "AccountTag": "#{ bootstrap_cloudflare.tunnel.account_id }#", + "TunnelSecret": "#{ bootstrap_cloudflare.tunnel.secret }#", + "TunnelID": "#{ bootstrap_cloudflare.tunnel.id }#" + } diff --git a/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 new file mode 100644 index 00000000..eb8d8da0 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/cloudflared/ks.yaml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudflared + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-dns + path: ./kubernetes/apps/network/cloudflared/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..95bb9b6b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/app/helmrelease.yaml.j2 @@ -0,0 +1,106 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: echo-server +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 2.6.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + controllers: + main: + strategy: RollingUpdate + containers: + main: + image: + repository: ghcr.io/mendhak/http-https-echo + tag: 31 + env: + HTTP_PORT: &port 8080 + LOG_WITHOUT_NEWLINE: true + LOG_IGNORE_PATH: /healthz + PROMETHEUS_ENABLED: true + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /healthz + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: false + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + resources: + requests: + cpu: 10m + limits: + memory: 64Mi + pod: + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + service: + main: + ports: + http: + port: *port + serviceMonitor: + main: + enabled: true + ingress: + main: + enabled: true + className: external + annotations: + external-dns.alpha.kubernetes.io/target: "external.${SECRET_DOMAIN}" + hosts: + - host: &host "{{ .Release.Name }}-external.${SECRET_DOMAIN}" + paths: + - path: / + service: + name: main + port: http + tls: + - hosts: + - *host + internal: + enabled: true + className: internal + hosts: + - host: &host "{{ .Release.Name }}-internal.${SECRET_DOMAIN}" + paths: + - path: / + service: + name: main + port: http + tls: + - hosts: + - *host diff --git a/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 new file mode 100644 index 00000000..2984f219 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/echo-server/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app echo-server + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/echo-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/dnsendpoint-crd.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/dnsendpoint-crd.yaml.j2 new file mode 100644 index 00000000..9254f89d --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/dnsendpoint-crd.yaml.j2 @@ -0,0 +1,93 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + api-approved.kubernetes.io: "https://github.com/kubernetes-sigs/external-dns/pull/2007" + creationTimestamp: null + name: dnsendpoints.externaldns.k8s.io +spec: + group: externaldns.k8s.io + names: + kind: DNSEndpoint + listKind: DNSEndpointList + plural: dnsendpoints + singular: dnsendpoint + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DNSEndpointSpec defines the desired state of DNSEndpoint + properties: + endpoints: + items: + description: Endpoint is a high-level way of a connection between a service and an IP + properties: + dnsName: + description: The hostname of the DNS record + type: string + labels: + additionalProperties: + type: string + description: Labels stores labels defined for the Endpoint + type: object + providerSpecific: + description: ProviderSpecific stores provider specific config + items: + description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers + properties: + name: + type: string + value: + type: string + type: object + type: array + recordTTL: + description: TTL for the record + format: int64 + type: integer + recordType: + description: RecordType type of record, e.g. CNAME, A, SRV, TXT etc + type: string + setIdentifier: + description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple') + type: string + targets: + description: The targets the DNS record points to + items: + type: string + type: array + type: object + type: array + type: object + status: + description: DNSEndpointStatus defines the observed state of DNSEndpoint + properties: + observedGeneration: + description: The generation observed by the external-dns controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..76f90410 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/helmrelease.yaml.j2 @@ -0,0 +1,47 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: &app external-dns +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.14.3 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + fullnameOverride: *app + provider: cloudflare + env: + - name: CF_API_TOKEN + valueFrom: + secretKeyRef: + name: external-dns-secret + key: api-token + extraArgs: + - --ingress-class=external + - --cloudflare-proxied + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + policy: sync + sources: ["crd", "ingress"] + txtPrefix: k8s. + txtOwnerId: default + domainFilters: ["${SECRET_DOMAIN}"] + serviceMonitor: + enabled: true + podAnnotations: + secret.reloader.stakater.com/reload: external-dns-secret diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 new file mode 100644 index 00000000..069449ad --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./dnsendpoint-crd.yaml + - ./secret.sops.yaml + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 new file mode 100644 index 00000000..c067b329 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/app/secret.sops.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: external-dns-secret +stringData: + api-token: "#{ bootstrap_cloudflare.token }#" diff --git a/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 new file mode 100644 index 00000000..eaed4b56 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/external-dns/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-dns + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/external-dns/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 new file mode 100644 index 00000000..94d1afbf --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/kustomization.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./staging.yaml + #% if bootstrap_cloudflare.acme.production %# + - ./production.yaml + #% endif %# diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 new file mode 100644 index 00000000..b5afdf41 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/production.yaml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "${SECRET_DOMAIN/./-}-production" +spec: + secretName: "${SECRET_DOMAIN/./-}-production-tls" + issuerRef: + name: letsencrypt-production + kind: ClusterIssuer + commonName: "${SECRET_DOMAIN}" + dnsNames: + - "${SECRET_DOMAIN}" + - "*.${SECRET_DOMAIN}" diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 new file mode 100644 index 00000000..9c869425 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/certificates/staging.yaml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "${SECRET_DOMAIN/./-}-staging" +spec: + secretName: "${SECRET_DOMAIN/./-}-staging-tls" + issuerRef: + name: letsencrypt-staging + kind: ClusterIssuer + commonName: "${SECRET_DOMAIN}" + dnsNames: + - "${SECRET_DOMAIN}" + - "*.${SECRET_DOMAIN}" diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 new file mode 100644 index 00000000..7fbdb805 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/helmrelease.yaml.j2 @@ -0,0 +1,93 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: ingress-nginx-external +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.9.1 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + dependsOn: + - name: cloudflared + namespace: network + values: + fullnameOverride: ingress-nginx-external + controller: + replicaCount: 1 + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "external.${SECRET_DOMAIN}" + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.tunnel.ingress_vip }#" + externalTrafficPolicy: Cluster + ingressClassResource: + name: external + default: false + controllerValue: k8s.io/external + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["external"] + config: + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + enable-real-ip: "true" + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 TLSv1.2 + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + #% if bootstrap_cloudflare.acme.production %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + #% else %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" + #% endif %# + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-external + app.kubernetes.io/component: controller + resources: + requests: + cpu: 100m + limits: + memory: 500Mi + defaultBackend: + enabled: false diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/external/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 new file mode 100644 index 00000000..21d86390 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/helmrelease.yaml.j2 @@ -0,0 +1,90 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: ingress-nginx-internal + namespace: network +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.9.1 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + fullnameOverride: ingress-nginx-internal + controller: + replicaCount: 1 + service: + annotations: + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.ingress_vip }#" + externalTrafficPolicy: Cluster + ingressClassResource: + name: internal + default: true + controllerValue: k8s.io/internal + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["internal"] + config: + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + enable-real-ip: "true" + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 TLSv1.2 + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + #% if bootstrap_cloudflare.acme.production %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-production-tls" + #% else %# + default-ssl-certificate: "network/${SECRET_DOMAIN/./-}-staging-tls" + #% endif %# + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx-internal + app.kubernetes.io/component: controller + resources: + requests: + cpu: 100m + limits: + memory: 500Mi + defaultBackend: + enabled: false diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/internal/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 new file mode 100644 index 00000000..99b1abb5 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/ingress-nginx/ks.yaml.j2 @@ -0,0 +1,66 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-certificates + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager-issuers + path: ./kubernetes/apps/network/ingress-nginx/certificates + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-internal + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/internal + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app ingress-nginx-external + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: ingress-nginx-certificates + path: ./kubernetes/apps/network/ingress-nginx/external + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..4349ac30 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/helmrelease.yaml.j2 @@ -0,0 +1,34 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: k8s-gateway +spec: + interval: 30m + chart: + spec: + chart: k8s-gateway + version: 2.3.0 + sourceRef: + kind: HelmRepository + name: k8s-gateway + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + fullnameOverride: k8s-gateway + domain: "${SECRET_DOMAIN}" + ttl: 1 + service: + type: LoadBalancer + port: 53 + annotations: + io.cilium/lb-ipam-ips: "#{ bootstrap_cloudflare.gateway_vip }#" + externalTrafficPolicy: Cluster diff --git a/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 new file mode 100644 index 00000000..06f44255 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/k8s-gateway/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app k8s-gateway + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/network/k8s-gateway/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 new file mode 100644 index 00000000..e6f8ddc1 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/kustomization.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./cloudflared/ks.yaml + - ./echo-server/ks.yaml + - ./external-dns/ks.yaml + - ./ingress-nginx/ks.yaml + - ./k8s-gateway/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 new file mode 100644 index 00000000..4d78d7b1 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/network/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: network + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/kustomization.yaml.j2 new file mode 100644 index 00000000..9cd8d4e4 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./openebs/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/namespace.yaml.j2 new file mode 100644 index 00000000..f173c6c9 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openebs-system + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..3eb8f347 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/helmrelease.yaml.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: openebs +spec: + interval: 30m + chart: + spec: + chart: openebs + version: 3.10.0 + sourceRef: + kind: HelmRepository + name: openebs + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + localprovisioner: + hostpathClass: + enabled: true + name: openebs-hostpath + isDefaultClass: false + basePath: /var/openebs/local diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml.j2 new file mode 100644 index 00000000..5dd7baca --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/bootstrap/templates/kubernetes/apps/openebs-system/openebs/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/ks.yaml.j2 new file mode 100644 index 00000000..170feca9 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/openebs-system/openebs/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app openebs + namespace: flux-system +spec: + targetNamespace: openebs-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/openebs-system/openebs/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py b/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py new file mode 100644 index 00000000..0979f9a6 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["k3s"] diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 new file mode 100644 index 00000000..c159f45b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./plan.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 new file mode 100644 index 00000000..5412ea57 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/app/plan.yaml.j2 @@ -0,0 +1,50 @@ +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: controllers +spec: + version: "${KUBE_VERSION}" + upgrade: + image: rancher/k3s-upgrade + serviceAccountName: system-upgrade + concurrency: 1 + cordon: true + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/etcd + effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists +--- +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: workers +spec: + version: "${KUBE_VERSION}" + serviceAccountName: system-upgrade + concurrency: 1 + nodeSelector: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + prepare: + image: rancher/k3s-upgrade + args: ["prepare", "server"] + upgrade: + image: rancher/k3s-upgrade diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 new file mode 100644 index 00000000..1423dbbe --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/k3s/ks.yaml.j2 @@ -0,0 +1,26 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-k3s + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: system-upgrade-controller + path: ./kubernetes/apps/system-upgrade/k3s/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + retryInterval: 1m + timeout: 5m + postBuild: + substitute: + # renovate: datasource=github-releases depName=k3s-io/k3s + KUBE_VERSION: v1.29.1+k3s2 diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 new file mode 100644 index 00000000..e0b2bf29 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./namespace.yaml + - ./system-upgrade-controller/ks.yaml + - ./k3s/ks.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 new file mode 100644 index 00000000..5ea024dd --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/namespace.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: system-upgrade + labels: + kustomize.toolkit.fluxcd.io/prune: disabled diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 new file mode 100644 index 00000000..dbf59f8b --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml.j2 @@ -0,0 +1,107 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta2 +kind: HelmRelease +metadata: + name: &app system-upgrade-controller +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 2.6.0 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + controllers: + main: + strategy: RollingUpdate + containers: + main: + image: + repository: docker.io/rancher/system-upgrade-controller + tag: v0.13.2 + env: + SYSTEM_UPGRADE_CONTROLLER_DEBUG: false + SYSTEM_UPGRADE_CONTROLLER_THREADS: 2 + SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: 900 + SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: 99 + SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: IfNotPresent + SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: registry.k8s.io/kubectl:v1.29.2 + SYSTEM_UPGRADE_JOB_PRIVILEGED: true + SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: 900 + SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m + SYSTEM_UPGRADE_CONTROLLER_NAME: *app + SYSTEM_UPGRADE_CONTROLLER_NAMESPACE: + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + pod: + securityContext: + runAsUser: 65534 + runAsGroup: 65534 + runAsNonRoot: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccount: + create: true + name: system-upgrade + service: + main: + enabled: false + persistence: + tmp: + type: emptyDir + globalMounts: + - path: /tmp + etc-ssl: + type: hostPath + hostPath: /etc/ssl + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/ssl + readOnly: true + etc-pki: + type: hostPath + hostPath: /etc/pki + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/pki + readOnly: true + etc-ca-certificates: + type: hostPath + hostPath: /etc/ca-certificates + hostPathType: DirectoryOrCreate + globalMounts: + - path: /etc/ca-certificates + readOnly: true diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 new file mode 100644 index 00000000..74d66703 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # renovate: datasource=github-releases depName=rancher/system-upgrade-controller + - https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml + - helmrelease.yaml + - rbac.yaml diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 new file mode 100644 index 00000000..123677c2 --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system-upgrade +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: system-upgrade + namespace: system-upgrade diff --git a/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 new file mode 100644 index 00000000..7fe74b4a --- /dev/null +++ b/bootstrap/templates/kubernetes/apps/system-upgrade/system-upgrade-controller/ks.yaml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-controller + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/apps/system-upgrade/system-upgrade-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + retryInterval: 1m + timeout: 5m diff --git a/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 new file mode 100644 index 00000000..0ef1f6e8 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/flux/github-deploy-key.sops.yaml.j2 @@ -0,0 +1,17 @@ +#% if bootstrap_github_private_key %# +--- +apiVersion: v1 +kind: Secret +metadata: + name: github-deploy-key + namespace: flux-system +stringData: + identity: | + #% filter indent(width=4, first=False) %# + #{ bootstrap_github_private_key }# + #%- endfilter %# + known_hosts: | + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk= +#% endif %# diff --git a/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 new file mode 100644 index 00000000..02af98f0 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/flux/kustomization.yaml.j2 @@ -0,0 +1,61 @@ +# IMPORTANT: This file is not tracked by flux and should never be. Its +# purpose is to only install the Flux components and CRDs into your cluster. +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.2.3 +patches: + # Remove the default network policies + - patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Resources renamed to match those installed by oci://ghcr.io/fluxcd/flux-manifests + - target: + kind: ResourceQuota + name: critical-pods + patch: | + - op: replace + path: /metadata/name + value: critical-pods-flux-system + - target: + kind: ClusterRoleBinding + name: cluster-reconciler + patch: | + - op: replace + path: /metadata/name + value: cluster-reconciler-flux-system + - target: + kind: ClusterRoleBinding + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: flux-edit + patch: | + - op: replace + path: /metadata/name + value: flux-edit-flux-system + - target: + kind: ClusterRole + name: flux-view + patch: | + - op: replace + path: /metadata/name + value: flux-view-flux-system diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py b/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py new file mode 100644 index 00000000..3ace63df --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/.mjfilter.py @@ -0,0 +1 @@ +main = lambda data: data.get("bootstrap_distribution", "k3s") in ["talos"] diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 new file mode 100644 index 00000000..ecaa0917 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/apps/cilium-values.yaml.j2 @@ -0,0 +1,4 @@ +--- +#% filter indent(width=0, first=True) %# +#% include 'partials/cilium-values-init.partial.yaml.j2' %# +#% endfilter %# diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 new file mode 100644 index 00000000..bea96763 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/apps/helmfile.yaml.j2 @@ -0,0 +1,22 @@ +--- +repositories: + - name: cilium + url: https://helm.cilium.io + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + +releases: + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.15.1 + wait: true + values: + - ./cilium-values.yaml + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.0.7 + wait: true + values: + - ./kubelet-csr-approver-values.yaml diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 new file mode 100644 index 00000000..d63b9845 --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/apps/kubelet-csr-approver-values.yaml.j2 @@ -0,0 +1,4 @@ +--- +#% filter indent(width=0, first=True) %# +#% include 'partials/kubelet-csr-approver-values.partial.yaml.j2' %# +#% endfilter %# diff --git a/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 new file mode 100644 index 00000000..bf52d9ae --- /dev/null +++ b/bootstrap/templates/kubernetes/bootstrap/talos/talconfig.yaml.j2 @@ -0,0 +1,244 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/master/pkg/config/schemas/talconfig.json +--- +# renovate: datasource=docker depName=ghcr.io/siderolabs/installer +talosVersion: v1.6.5 +# renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet +kubernetesVersion: v1.29.2 + +clusterName: &cluster home-kubernetes +endpoint: https://#{ bootstrap_controllers_vip }#:6443 +clusterPodNets: + - "#{ bootstrap_pod_network.split(',')[0] }#" +clusterSvcNets: + - "#{ bootstrap_service_network.split(',')[0] }#" +additionalApiServerCertSans: &sans + - "#{ bootstrap_controllers_vip }#" + - 127.0.0.1 # KubePrism + #% for item in bootstrap_tls_sans %# + - "#{ item }#" + #% endfor %# +additionalMachineCertSans: *sans +cniConfig: + name: none + +nodes: + #% for item in bootstrap_node_inventory %# + - hostname: "#{ item.name }#" + ipAddress: "#{ item.address }#" + #% if item.talos_disk.startswith('/') %# + installDisk: "#{ item.talos_disk }#" + #% else %# + installDiskSelector: + serial: "#{ item.talos_disk }#" + #% endif %# + #% if bootstrap_talos.secureboot.enabled %# + machineSpec: + secureboot: true + talosImageURL: factory.talos.dev/installer-secureboot/#{ bootstrap_talos.schematic_id }# + #% else %# + talosImageURL: factory.talos.dev/installer/#{ bootstrap_talos.schematic_id }# + #% endif %# + controlPlane: #{ (item.controller) | string | lower }# + networkInterfaces: + - interface: eth0 + dhcp: false + #% if bootstrap_talos.vlan %# + vlans: + - vlanId: #{ bootstrap_talos.vlan }# + addresses: + - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" + mtu: 1500 + routes: + - network: 0.0.0.0/0 + #% if bootstrap_node_default_gateway %# + gateway: "#{ bootstrap_node_default_gateway }#" + #% else %# + gateway: "#{ bootstrap_node_network | nthhost(1) }#" + #% endif %# + #% if item.controller %# + vip: + ip: "#{ bootstrap_controllers_vip }#" + #% endif %# + #% else %# + addresses: + - "#{ item.address }#/#{ bootstrap_node_network.split('/') | last }#" + mtu: 1500 + routes: + - network: 0.0.0.0/0 + #% if bootstrap_node_default_gateway %# + gateway: "#{ bootstrap_node_default_gateway }#" + #% else %# + gateway: "#{ bootstrap_node_network | nthhost(1) }#" + #% endif %# + #% if item.controller %# + vip: + ip: "#{ bootstrap_controllers_vip }#" + #% endif %# + #% endif %# + #% if bootstrap_talos.user_patches %# + patches: + - "@./patches/node_#{ item.name }#.yaml" + #% endif %# + #% endfor %# + +patches: + # Configure containerd + - |- + machine: + files: + - op: create + path: /etc/cri/conf.d/20-customization.part + content: |- + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + + # Disable search domain everywhere + - |- + machine: + network: + disableSearchDomain: true + + # Enable cluster discovery + - |- + cluster: + discovery: + registries: + kubernetes: + disabled: false + service: + disabled: false + + # Configure kubelet + - |- + machine: + kubelet: + extraArgs: + image-gc-low-threshold: 50 + image-gc-high-threshold: 55 + rotate-server-certificates: true + nodeIP: + validSubnets: + - "#{ bootstrap_node_network }#" + + # Force nameserver + - |- + machine: + network: + nameservers: + #% for item in bootstrap_dns_servers | default(['1.1.1.1', '1.0.0.1']) %# + - #{ item }# + #% endfor %# + + # Configure NTP + - |- + machine: + time: + disabled: false + servers: + - time.cloudflare.com + + # Custom sysctl settings + - |- + machine: + sysctls: + fs.inotify.max_queued_events: 65536 + fs.inotify.max_user_watches: 524288 + fs.inotify.max_user_instances: 8192 + + # Mount openebs-hostpath in kubelet + - |- + machine: + kubelet: + extraMounts: + - destination: /var/openebs/local + type: bind + source: /var/openebs/local + options: + - bind + - rshared + - rw + + # Disable predictable NIC naming + - |- + machine: + install: + extraKernelArgs: + - net.ifnames=0 + + #% if bootstrap_talos.secureboot.enabled and bootstrap_talos.secureboot.encrypt_disk_with_tpm %# + # Encrypt system disk with TPM + - |- + machine: + systemDiskEncryption: + ephemeral: + provider: luks2 + keys: + - slot: 0 + tpm: {} + state: + provider: luks2 + keys: + - slot: 0 + tpm: {} + #% endif %# + #% if bootstrap_talos.user_patches %# + # User specified global patches + - "@./patches/global.yaml" + #% endif %# + +controlPlane: + patches: + # Cluster configuration + - |- + cluster: + allowSchedulingOnMasters: true + controllerManager: + extraArgs: + bind-address: 0.0.0.0 + proxy: + disabled: true + scheduler: + extraArgs: + bind-address: 0.0.0.0 + + # ETCD configuration + - |- + cluster: + etcd: + extraArgs: + listen-metrics-urls: http://0.0.0.0:2381 + advertisedSubnets: + - "#{ bootstrap_node_network }#" + + # Disable default API server admission plugins. + - |- + - op: remove + path: /cluster/apiServer/admissionControl + + # Enable K8s Talos API Access + - |- + machine: + features: + kubernetesTalosAPIAccess: + enabled: true + allowedRoles: + - os:admin + allowedKubernetesNamespaces: + - system-upgrade + + #% if bootstrap_talos.user_patches %# + # User specified controlPlane patches + - "@./patches/controlPlane.yaml" + #% endif %# + +#% if ((bootstrap_talos.user_patches) and (bootstrap_node_inventory | selectattr('controller', 'equalto', False) | list | length)) %# +worker: + patches: + # User specified worker patches + - "@./patches/worker.yaml" +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/apps.yaml.j2 b/bootstrap/templates/kubernetes/flux/apps.yaml.j2 new file mode 100644 index 00000000..2284be62 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/apps.yaml.j2 @@ -0,0 +1,56 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/apps + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + - kind: ConfigMap + name: cluster-settings-user + optional: true + - kind: Secret + name: cluster-secrets-user + optional: true + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + metadata: + name: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets + - kind: ConfigMap + name: cluster-settings-user + optional: true + - kind: Secret + name: cluster-secrets-user + optional: true + target: + group: kustomize.toolkit.fluxcd.io + kind: Kustomization + labelSelector: substitution.flux.home.arpa/disabled notin (true) diff --git a/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 new file mode 100644 index 00000000..bae21e83 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/config/cluster.yaml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: home-kubernetes + namespace: flux-system +spec: + interval: 30m + url: "#{ bootstrap_github_address }#" + #% if bootstrap_github_private_key %# + secretRef: + name: github-deploy-key + #% endif %# + ref: + branch: "#{ bootstrap_github_branch|default('main', true) }#" + ignore: | + # exclude all + /* + # include kubernetes directory + !/kubernetes +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/flux + prune: true + wait: false + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: ConfigMap + name: cluster-settings + - kind: Secret + name: cluster-secrets diff --git a/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 new file mode 100644 index 00000000..6d454860 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/config/flux.yaml.j2 @@ -0,0 +1,86 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: flux-manifests + namespace: flux-system +spec: + interval: 10m + url: oci://ghcr.io/fluxcd/flux-manifests + ref: + tag: v2.2.3 +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m + path: ./ + prune: true + wait: true + sourceRef: + kind: OCIRepository + name: flux-manifests + patches: + # Remove the network policies that does not work with k3s + - patch: | + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + target: + group: networking.k8s.io + kind: NetworkPolicy + # Increase the number of reconciliations that can be performed in parallel and bump the resources limits + # https://fluxcd.io/flux/cheatsheets/bootstrap/#increase-the-number-of-workers + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=8 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-qps=500 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --kube-api-burst=1000 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: not-used + spec: + template: + spec: + containers: + - name: manager + resources: + limits: + cpu: 2000m + memory: 2Gi + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + # Enable Helm near OOM detection + # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=OOMWatch=true + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-memory-threshold=95 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-interval=500ms + target: + kind: Deployment + name: helm-controller diff --git a/bootstrap/templates/kubernetes/flux/config/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/config/kustomization.yaml.j2 new file mode 100644 index 00000000..ef231746 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/config/kustomization.yaml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./flux.yaml + - ./cluster.yaml diff --git a/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 new file mode 100644 index 00000000..fe0f332a --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/git/kustomization.yaml.j2 @@ -0,0 +1,4 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 new file mode 100644 index 00000000..df0c6474 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/bjw-s.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: bjw-s + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/bjw-s/helm diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 new file mode 100644 index 00000000..51c65d69 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/cilium.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: cilium + namespace: flux-system +spec: + interval: 1h + url: https://helm.cilium.io diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 new file mode 100644 index 00000000..78812e54 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/external-dns.yaml.j2 @@ -0,0 +1,11 @@ +#% if bootstrap_cloudflare.enabled %# +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: external-dns + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/external-dns +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 new file mode 100644 index 00000000..2a991f9e --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/ingress-nginx.yaml.j2 @@ -0,0 +1,11 @@ +#% if bootstrap_cloudflare.enabled %# +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: ingress-nginx + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes.github.io/ingress-nginx +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 new file mode 100644 index 00000000..1b4982d6 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/jetstack.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: jetstack + namespace: flux-system +spec: + interval: 1h + url: https://charts.jetstack.io diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 new file mode 100644 index 00000000..d55abf4d --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/k8s-gateway.yaml.j2 @@ -0,0 +1,11 @@ +#% if bootstrap_cloudflare.enabled %# +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: k8s-gateway + namespace: flux-system +spec: + interval: 1h + url: https://ori-edge.github.io/k8s_gateway +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 new file mode 100644 index 00000000..706bc8cd --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/kustomization.yaml.j2 @@ -0,0 +1,21 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./bjw-s.yaml + - ./cilium.yaml + #% if bootstrap_cloudflare.enabled %# + - ./external-dns.yaml + - ./ingress-nginx.yaml + - ./k8s-gateway.yaml + #% endif %# + - ./jetstack.yaml + - ./metrics-server.yaml + - ./openebs.yaml + #% if bootstrap_distribution in ["talos"] %# + - ./postfinance.yaml + #% endif %# + - ./stakater.yaml + #% if bootstrap_distribution in ["talos"] %# + - ./xenitab.yaml + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 new file mode 100644 index 00000000..57e7aa0c --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/metrics-server.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: metrics-server + namespace: flux-system +spec: + interval: 1h + url: https://kubernetes-sigs.github.io/metrics-server diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/openebs.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/openebs.yaml.j2 new file mode 100644 index 00000000..d0f105e6 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/openebs.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: openebs + namespace: flux-system +spec: + interval: 1h + url: https://openebs.github.io/charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 new file mode 100644 index 00000000..bb917225 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/postfinance.yaml.j2 @@ -0,0 +1,11 @@ +#% if bootstrap_distribution in ["talos"] %# +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 1h + url: https://postfinance.github.io/kubelet-csr-approver +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 new file mode 100644 index 00000000..1846e8ae --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/stakater.yaml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: stakater + namespace: flux-system +spec: + interval: 1h + url: https://stakater.github.io/stakater-charts diff --git a/bootstrap/templates/kubernetes/flux/repositories/helm/xenitab.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/helm/xenitab.yaml.j2 new file mode 100644 index 00000000..83f7ddac --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/helm/xenitab.yaml.j2 @@ -0,0 +1,12 @@ +#% if bootstrap_distribution in ["talos"] %# +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: xenitab + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/xenitab/helm-charts +#% endif %# diff --git a/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 new file mode 100644 index 00000000..d158d426 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/kustomization.yaml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./git + - ./helm + - ./oci diff --git a/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 new file mode 100644 index 00000000..fe0f332a --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/repositories/oci/kustomization.yaml.j2 @@ -0,0 +1,4 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: [] diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 new file mode 100644 index 00000000..71a496d7 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/vars/cluster-secrets.sops.yaml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + SECRET_EXAMPLE: Neque porro quisquam est qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit... + #% if bootstrap_cloudflare.enabled %# + SECRET_DOMAIN: "#{ bootstrap_cloudflare.domain }#" + SECRET_ACME_EMAIL: "#{ bootstrap_cloudflare.acme.email }#" + SECRET_CLOUDFLARE_TUNNEL_ID: "#{ bootstrap_cloudflare.tunnel.id }#" + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 new file mode 100644 index 00000000..f176c7f5 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/vars/cluster-settings.yaml.j2 @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-settings + namespace: flux-system +data: + TIMEZONE: "#{ bootstrap_timezone }#" + CLUSTER_CIDR: "#{ bootstrap_pod_network.split(',')[0] }#" + NODE_CIDR: "#{ bootstrap_node_network }#" + #% if bootstrap_feature_gates.dual_stack_ipv4_first %# + CLUSTER_CIDR_V6: "#{ bootstrap_pod_network.split(',')[1] }#" + #% endif %# + #% if bootstrap_bgp.enabled %# + BGP_ADVERTISED_CIDR: "#{ bootstrap_bgp.advertised_network }#" + #% endif %# diff --git a/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 b/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 new file mode 100644 index 00000000..8db2fe91 --- /dev/null +++ b/bootstrap/templates/kubernetes/flux/vars/kustomization.yaml.j2 @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./cluster-settings.yaml + - ./cluster-secrets.sops.yaml diff --git a/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 b/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 new file mode 100644 index 00000000..111b031b --- /dev/null +++ b/bootstrap/templates/partials/cilium-values-full.partial.yaml.j2 @@ -0,0 +1,128 @@ +autoDirectNodeRoutes: true +bgpControlPlane: + enabled: true +bpf: + masquerade: false +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +cluster: + id: 1 + name: home-kubernetes +containerRuntime: + integration: containerd + #% if bootstrap_distribution in ["k3s"] %# + socketPath: /var/run/k3s/containerd/containerd.sock + #% endif %# +# NOTE: This might need to be set if you have more than one active NIC on your hosts +# devices: +# - eno0 +endpointRoutes: + enabled: true +#% if bootstrap_cloudflare.enabled %# +hubble: + enabled: true + metrics: + enabled: + - dns:query + - drop + - tcp + - flow + - port-distribution + - icmp + - http + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + relay: + enabled: true + rollOutPods: true + prometheus: + serviceMonitor: + enabled: true + ui: + enabled: true + rollOutPods: true + ingress: + enabled: true + className: internal + hosts: + - "hubble.${SECRET_DOMAIN}" + tls: + - hosts: + - "hubble.${SECRET_DOMAIN}" +#% else %# +hubble: + enabled: false +#% endif %# +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: "${CLUSTER_CIDR}" +#% if bootstrap_feature_gates.dual_stack_ipv4_first %# +ipv6NativeRoutingCIDR: "${CLUSTER_CIDR_V6}" +ipv6: + enabled: true +#% endif %# +#% if bootstrap_distribution in ["k3s"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 6444 +#% elif bootstrap_distribution in ["talos"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +#% endif %# +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + #% if ((bootstrap_bgp.enabled) or (bootstrap_feature_gates.dual_stack_ipv4_first)) %# + enabled: false # https://github.com/cilium/cilium/issues/28985 + #% else %# + enabled: true + #% endif %# +loadBalancer: + algorithm: maglev + mode: dsr +localRedirectPolicy: true +operator: + replicas: 1 + rollOutPods: true + prometheus: + enabled: true + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium +prometheus: + enabled: true + serviceMonitor: + enabled: true + trustCRDsExist: true +dashboards: + enabled: true + annotations: + grafana_folder: Cilium +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 b/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 new file mode 100644 index 00000000..8ec51ebf --- /dev/null +++ b/bootstrap/templates/partials/cilium-values-init.partial.yaml.j2 @@ -0,0 +1,74 @@ +autoDirectNodeRoutes: true +bgpControlPlane: + enabled: true +bpf: + masquerade: false +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +cluster: + id: 1 + name: home-kubernetes +containerRuntime: + integration: containerd + #% if bootstrap_distribution in ["k3s"] %# + socketPath: /var/run/k3s/containerd/containerd.sock + #% endif %# +# NOTE: This might need to be set if you have more than one active NIC on your hosts +# devices: +# - eno0 +endpointRoutes: + enabled: true +hubble: + enabled: false +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: "#{ bootstrap_pod_network }#" +#% if bootstrap_feature_gates.dual_stack_ipv4_first %# +ipv6NativeRoutingCIDR: "#{ bootstrap_pod_network_v6 }#" +ipv6: + enabled: true +#% endif %# +#% if bootstrap_distribution in ["k3s"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 6444 +#% elif bootstrap_distribution in ["talos"] %# +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +#% endif %# +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + #% if ((bootstrap_bgp.enabled) or (bootstrap_feature_gates.dual_stack_ipv4_first)) %# + enabled: false # https://github.com/cilium/cilium/issues/28985 + #% else %# + enabled: true + #% endif %# +loadBalancer: + algorithm: maglev + mode: dsr +localRedirectPolicy: true +operator: + replicas: 1 + rollOutPods: true +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 b/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 new file mode 100644 index 00000000..6c9475be --- /dev/null +++ b/bootstrap/templates/partials/kube-vip-ds.partial.yaml.j2 @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-vip + namespace: kube-system + labels: + app.kubernetes.io/name: kube-vip +spec: + selector: + matchLabels: + app.kubernetes.io/name: kube-vip + template: + metadata: + labels: + app.kubernetes.io/name: kube-vip + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + args: ["manager"] + env: + - name: address + value: "#{ bootstrap_controllers_vip }#" + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: prometheus_server + value: :2112 + securityContext: + capabilities: + add: ["NET_ADMIN", "NET_RAW", "SYS_TIME"] + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + serviceAccountName: kube-vip + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists diff --git a/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 b/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 new file mode 100644 index 00000000..d6ecc936 --- /dev/null +++ b/bootstrap/templates/partials/kube-vip-rbac.partial.yaml.j2 @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-vip + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:kube-vip-role +rules: + - apiGroups: [""] + resources: ["services/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["services", "endpoints"] + verbs: ["list","get","watch", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","get","watch", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["list", "get", "watch", "update", "create"] + - apiGroups: ["discovery.k8s.io"] + resources: ["endpointslices"] + verbs: ["list","get","watch", "update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-vip-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kube-vip-role +subjects: +- kind: ServiceAccount + name: kube-vip + namespace: kube-system diff --git a/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 b/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 new file mode 100644 index 00000000..0bf92493 --- /dev/null +++ b/bootstrap/templates/partials/kubelet-csr-approver-values.partial.yaml.j2 @@ -0,0 +1,2 @@ +providerRegex: ^(#{ (bootstrap_node_inventory | map(attribute='name') | join('|')) }#)$ +bypassDnsResolution: true diff --git a/config.sample.yaml b/config.sample.yaml new file mode 100644 index 00000000..ec3b4c7d --- /dev/null +++ b/config.sample.yaml @@ -0,0 +1,207 @@ +--- + +# +# 1. (Required) Cluster details - Cluster represents the Kubernetes cluster layer and any additional customizations +# + +# (Required) Timezone is your IANA formatted timezone (e.g. America/New_York) +bootstrap_timezone: "" + +# (Required) Distribution can either be k3s or talos +bootstrap_distribution: k3s + +# (Required: Talos) Talos Specific Options +bootstrap_talos: + # (Required: Talos) If you need any additional System Extensions, and/or add kernel arguments generate a schematic ID. + # Go to https://factory.talos.dev/ and choose the System Extensions, and/or add kernel arguments. + schematic_id: "" + # (Optional: Talos) Add vlan tag to network master device + # See: https://www.talos.dev/latest/advanced/advanced-networking/#vlans + vlan: "" + # (Optional: Talos) Secureboot and TPM-based disk encryption + secureboot: + # (Optional) Enable secureboot on UEFI systems. Not supported on x86 platforms in BIOS mode. + # See: https://www.talos.dev/latest/talos-guides/install/bare-metal-platforms/secureboot + enabled: false + # (Optional) Enable TPM-based disk encryption. Requires TPM 2.0 + # See: https://www.talos.dev/v1.6/talos-guides/install/bare-metal-platforms/secureboot/#disk-encryption-with-tpm + encrypt_disk_with_tpm: false + # (Optional) Add includes for user provided patches to generated talconfig.yaml. + # See: https://github.com/budimanjojo/talhelper/blob/179ba9ed42f70069c7842109bea24f769f7af6eb/example/extraKernelArgs-patch.yaml + # Patches are applied in this order. (global overrides cp/worker which overrides node-specific). + # Create these files to allow talos:bootstrap-genconfig to complete (empty files are ok). + # kubernetes/bootstrap/talos/patches/node_.yaml # Patches for individual nodes + # kubernetes/bootstrap/talos/patches/controlPlane.yaml # Patches for controlplane nodes + # kubernetes/bootstrap/talos/patches/worker.yaml # Patches for worker nodes + # kubernetes/bootstrap/talos/patches/global.yaml # Patches for ALL nodes + user_patches: false + +# (Required) The CIDR your nodes are on (e.g. 192.168.1.0/24) +bootstrap_node_network: "" + +# (Optional) The default gateway for the nodes +# Default is .1 derrived from bootstrap_node_network: 'x.x.x.1' +bootstrap_node_default_gateway: "" + +# (Required) Use only 1, 3 or more ODD number of controller nodes, recommended is 3 +# Worker nodes are optional +bootstrap_node_inventory: [] + # - name: "" # Name of the node (must match [a-z0-9-\.]+) + # address: "" # IP address of the node + # controller: true # (Required) Set to true if this is a controller node + # ssh_user: "" # (Required: k3s) SSH username of the node + # talos_disk: "" # (Required: Talos) Device path or serial number of the disk for this node + # ... + +# (Optional) The DNS server to use for the cluster, this can be an existing +# local DNS server or a public one. +# Default is ["1.1.1.1", "1.0.0.1"] +# If using a local DNS server make sure it meets the following requirements: +# 1. your nodes can reach it +# 2. it is configured to forward requests to a public DNS server +# 3. you are not force redirecting DNS requests to it - this will break cert generation over DNS01 +# If using multiple DNS servers make sure they are setup the same way, there is no +# guarantee that the first DNS server will always be used for every lookup. +bootstrap_dns_servers: [] + +# (Optional) The DNS search domain to use for the nodes. +# Default is "." +# Use the default or leave empty to avoid possible DNS issues inside the cluster. +bootstrap_search_domain: "" + +# (Required) The pod CIDR for the cluster, this must NOT overlap with any +# existing networks and is usually a /16 (64K IPs). +# If you want to use IPv6 check the advanced flags below +bootstrap_pod_network: "10.69.0.0/16" + +# (Required) The service CIDR for the cluster, this must NOT overlap with any +# existing networks and is usually a /16 (64K IPs). +# If you want to use IPv6 check the advanced flags below +bootstrap_service_network: "10.96.0.0/16" + +# (Required) The IP address of the Kube API, choose an available IP in +# your nodes host network that is NOT being used. This is announced over L2. +# For k3s kube-vip is used, built-in functionality is used with Talos +bootstrap_controllers_vip: "" + +# (Optional) Add additional SANs to the Kube API cert, this is useful +# if you want to call the Kube API by hostname rather than IP +bootstrap_tls_sans: [] + +# (Required) Age Public Key (e.g. age1...) +# 1. Generate a new key with the following command: +# > task sops:age-keygen +# 2. Copy the public key and paste it below +bootstrap_sops_age_pubkey: "" + +# (Optional) Use cilium BGP control plane when L2 announcements won't traverse VLAN network segments. +# Needs a BGP capable router setup with the node IPs as peers. +# See: https://docs.cilium.io/en/latest/network/bgp-control-plane/ +bootstrap_bgp: + enabled: false + # (Optional) If using multiple BGP peers add them here. + # Default is .1 derrived from host_network: ['x.x.x.1'] + peers: [] + # (Required) Set the BGP Autonomous System Number for the router(s) and nodes. + # If these match, iBGP will be used. If not, eBGP will be used. + peer_asn: "" # Router(s) AS + local_asn: "" # Node(s) AS + # (Required) The advertised CIDR for the cluster, this must NOT overlap with any + # existing networks and is usually a /16 (64K IPs). + # If you want to use IPv6 check the advanced flags below + advertised_network: "" + +# +# 2. (Required) Flux details - Flux is used to manage the cluster configuration. +# + +# (Required) GitHub repository URL (for private repos use the ssh:// URL) +bootstrap_github_address: "" + +# (Required) GitHub repository branch +bootstrap_github_branch: "main" + +# (Required) Token for GitHub push-based sync +# 1. Generate a new token with the following command: +# > openssl rand -hex 16 +# 2. Copy the token and paste it below +bootstrap_github_webhook_token: "" + +# (Optional) Private key for Flux to access the GitHub repository +# 1. Generate a new key with the following command: +# > ssh-keygen -t ecdsa -b 521 -C "github-deploy-key" -f github-deploy.key -q -P "" +# 2. Make sure to paste public key from "github-deploy.key.pub" into +# the deploy keys section of your repository settings. +# 3. Uncomment and paste the private key below +# 4. Optionally set your repository on GitHub to private +# bootstrap_github_private_key: | +# -----BEGIN OPENSSH PRIVATE KEY----- +# ... +# -----END OPENSSH PRIVATE KEY----- + +# +# 3. (Optional) Cloudflare details - Cloudflare is used for DNS, TLS certificates and tunneling. +# + +bootstrap_cloudflare: + # (Required) Disable to use a different DNS provider + enabled: false + # (Required) Cloudflare Domain + domain: "" + # (Required) Cloudflare API Token (NOT API Key) + # 1. Head over to Cloudflare and create a API Token by going to + # https://dash.cloudflare.com/profile/api-tokens + # 2. Under the `API Tokens` section click the blue `Create Token` button. + # 3. Click the blue `Use template` button for the `Edit zone DNS` template. + # 4. Name your token something like `home-kubernetes` + # 5. Under `Permissions`, click `+ Add More` and add each permission below: + # `Zone - DNS - Edit` + # `Account - Cloudflare Tunnel - Read` + # 6. Limit the permissions to a specific account and zone resources. + # 7. Click the blue `Continue to Summary` button and then the blue `Create Token` button. + # 8. Copy the token and paste it below. + token: "" + # (Required) Optionals for Cloudflare Acme + acme: + # (Required) Any email you want to be associated with the ACME account (used for TLS certs via letsencrypt.org) + email: "" + # (Required) Use the ACME production server when requesting the wildcard certificate. + # By default the ACME staging server is used. This is to prevent being rate-limited. + # Update this option to `true` when you have verified the staging certificate + # works and then re-run `task configure` and push your changes to Github. + production: false + # (Required) Provide LAN access to the cluster ingresses for internal ingress classes + # The Load balancer IP for internal ingress, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + ingress_vip: "" + # (Required) Gateway is used for providing DNS to your cluster on LAN + # The Load balancer IP for k8s_gateway, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + gateway_vip: "" + # (Required) Options for Cloudflare Tunnel + # 1. Authenticate cloudflared to your domain + # > cloudflared tunnel login + # 2. Create the tunnel + # > cloudflared tunnel create k8s + # 3. Copy the AccountTag, TunnelID, and TunnelSecret from the tunnel configuration file and paste them below + tunnel: + # (Required) Cloudflare Account ID (cat ~/.cloudflared/*.json | jq -r .AccountTag) + account_id: "" + # (Required) Cloudflared Tunnel ID (cat ~/.cloudflared/*.json | jq -r .TunnelID) + id: "" + # (Required) Cloudflared Tunnel Secret (cat ~/.cloudflared/*.json | jq -r .TunnelSecret) + secret: "" + # (Required) Provide WAN access to the cluster ingresses for external ingress classes + # The Load balancer IP for external ingress, choose an available IP + # in your nodes host network that is NOT being used. This is announced over L2. + ingress_vip: "" + +# (Optional) Feature gates are used to enable experimental features +# bootstrap_feature_gates: +# # Enable Dual Stack IPv4 first +# # IMPORTANT: I am looking for people to help maintain IPv6 support since I cannot test it. +# # Ref: https://github.com/onedr0p/cluster-template/issues/1148 +# # Keep in mind that Cilium does not currently support IPv6 L2 announcements. +# # Make sure you set cluster.pod_cidr and cluster.service_cidr +# # to a valid dual stack CIDRs, e.g. "10.42.0.0/16,fd00:10:244::/64" +# dual_stack_ipv4_first: false diff --git a/makejinja.toml b/makejinja.toml new file mode 100644 index 00000000..52845a37 --- /dev/null +++ b/makejinja.toml @@ -0,0 +1,18 @@ +[makejinja] +inputs = ["./bootstrap/overrides","./bootstrap/templates"] +output = "./" +exclude_patterns = [".mjfilter.py", "*.partial.yaml.j2"] +data = ["./config.yaml"] +import_paths = ["./bootstrap/scripts"] +loaders = ["plugin:Plugin"] +jinja_suffix = ".j2" +force = true +undefined = "chainable" + +[makejinja.delimiter] +block_start = "#%" +block_end = "%#" +comment_start = "#|" +comment_end = "#|" +variable_start = "#{" +variable_end = "}#" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..bc07cd21 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +bcrypt==4.1.2 +cloudflare==2.19.2 +email-validator==2.1.0.post1 +makejinja==2.5.0 +netaddr==1.2.1 +passlib==1.7.4 diff --git a/scripts/kubeconform.sh b/scripts/kubeconform.sh new file mode 100755 index 00000000..a69308b1 --- /dev/null +++ b/scripts/kubeconform.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -o errexit +set -o pipefail + +KUBERNETES_DIR=$1 + +[[ -z "${KUBERNETES_DIR}" ]] && echo "Kubernetes location not specified" && exit 1 + +kustomize_args=("--load-restrictor=LoadRestrictionsNone") +kustomize_config="kustomization.yaml" +kubeconform_args=( + "-strict" + "-ignore-missing-schemas" + "-skip" + "Secret" + "-schema-location" + "default" + "-schema-location" + "https://kubernetes-schemas.pages.dev/{{.Group}}/{{.ResourceKind}}_{{.ResourceAPIVersion}}.json" + "-verbose" +) + +echo "=== Validating standalone manifests in ${KUBERNETES_DIR}/flux ===" +find "${KUBERNETES_DIR}/flux" -maxdepth 1 -type f -name '*.yaml' -print0 | while IFS= read -r -d $'\0' file; + do + kubeconform "${kubeconform_args[@]}" "${file}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done + +echo "=== Validating kustomizations in ${KUBERNETES_DIR}/flux ===" +find "${KUBERNETES_DIR}/flux" -type f -name $kustomize_config -print0 | while IFS= read -r -d $'\0' file; + do + echo "=== Validating kustomizations in ${file/%$kustomize_config} ===" + kustomize build "${file/%$kustomize_config}" "${kustomize_args[@]}" | \ + kubeconform "${kubeconform_args[@]}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done + +echo "=== Validating kustomizations in ${KUBERNETES_DIR}/apps ===" +find "${KUBERNETES_DIR}/apps" -type f -name $kustomize_config -print0 | while IFS= read -r -d $'\0' file; + do + echo "=== Validating kustomizations in ${file/%$kustomize_config} ===" + kustomize build "${file/%$kustomize_config}" "${kustomize_args[@]}" | \ + kubeconform "${kubeconform_args[@]}" + if [[ ${PIPESTATUS[0]} != 0 ]]; then + exit 1 + fi +done