generated from onedr0p/cluster-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.sample.yaml
222 lines (198 loc) · 11 KB
/
config.sample.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
---
#
# 1. (Required) Cluster details - Cluster represents the Kubernetes cluster layer and any additional customizations
#
# (Required) Timezone is your IANA formatted timezone (e.g. America/New_York)
bootstrap_timezone: ""
# (Required) Distribution can either be k3s or talos
bootstrap_distribution: k3s
# (Optional) Cluster name; affects Cilium and Talos
bootstrap_cluster_name: ""
# (Required: Talos) Talos Specific Options
bootstrap_talos:
# (Optional: Talos) Go to https://factory.talos.dev/ and choose any System Extensions, and/or add kernel arguments you need.
# Copy the generated schematic id and paste it below.
# IMPORTANT: The default ID given here means no System Extensions or Kernel args will be used.
schematic_id: "376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba"
# (Optional: Talos) Add vlan tag to network master device, this is not needed if you tag ports on your switch with the VLAN
# See: https://www.talos.dev/latest/advanced/advanced-networking/#vlans
vlan: ""
# (Optional: Talos) Secureboot and TPM-based disk encryption
secureboot:
# (Optional) Enable secureboot on UEFI systems. Not supported on x86 platforms in BIOS mode.
# See: https://www.talos.dev/latest/talos-guides/install/bare-metal-platforms/secureboot
enabled: false
# (Optional) Enable TPM-based disk encryption. Requires TPM 2.0
# See: https://www.talos.dev/v1.6/talos-guides/install/bare-metal-platforms/secureboot/#disk-encryption-with-tpm
encrypt_disk_with_tpm: false
# (Optional) Add includes for user provided patches to generated talconfig.yaml.
# See: https://github.com/budimanjojo/talhelper/blob/179ba9ed42f70069c7842109bea24f769f7af6eb/example/extraKernelArgs-patch.yaml
# Patches are applied in this order. (global overrides cp/worker which overrides node-specific).
# Create these files to allow talos:bootstrap-genconfig to complete (empty files are ok).
# kubernetes/bootstrap/talos/patches/node_<name>.yaml # Patches for individual nodes
# kubernetes/bootstrap/talos/patches/controlPlane.yaml # Patches for controlplane nodes
# kubernetes/bootstrap/talos/patches/worker.yaml # Patches for worker nodes
# kubernetes/bootstrap/talos/patches/global.yaml # Patches for ALL nodes
user_patches: false
# (Required) The CIDR your nodes are on (e.g. 192.168.1.0/24)
bootstrap_node_network: ""
# (Optional) The default gateway for the nodes
# Default is .1 derrived from bootstrap_node_network: 'x.x.x.1'
bootstrap_node_default_gateway: ""
# (Required) Use only 1, 3 or more ODD number of controller nodes, recommended is 3
# Worker nodes are optional
bootstrap_node_inventory: []
# - name: "" # (Required) Name of the node (must match [a-z0-9-\.]+)
# address: "" # (Required) IP address of the node
# controller: true # (Required) Set to true if this is a controller node
# talos_disk: "" # (Required: Talos) Device path or serial number of the disk for this node (talosctl disks -n <ip> --insecure)
# talos_nic: "" # (Required: Talos) MAC address of the NIC for this node (talosctl get links -n <ip> --insecure)
# ssh_user: "" # (Required: k3s) SSH username of the node
# ssh_key: "" # (Optional: k3s) Set specific SSH key for this node
# ...
# (Optional) The DNS server to use for the cluster, this can be an existing
# local DNS server or a public one.
# Default is ["1.1.1.1", "1.0.0.1"]
# If using a local DNS server make sure it meets the following requirements:
# 1. your nodes can reach it
# 2. it is configured to forward requests to a public DNS server
# 3. you are not force redirecting DNS requests to it - this will break cert generation over DNS01
# If using multiple DNS servers make sure they are setup the same way, there is no
# guarantee that the first DNS server will always be used for every lookup.
bootstrap_dns_servers: []
# (Optional) The DNS search domain to use for the nodes.
# Default is "."
# Use the default or leave empty to avoid possible DNS issues inside the cluster.
bootstrap_search_domain: ""
# (Required) The pod CIDR for the cluster, this must NOT overlap with any
# existing networks and is usually a /16 (64K IPs).
# If you want to use IPv6 check the advanced flags below and be aware of
# https://github.com/onedr0p/cluster-template/issues/1148
bootstrap_pod_network: "10.69.0.0/16"
# (Required) The service CIDR for the cluster, this must NOT overlap with any
# existing networks and is usually a /16 (64K IPs).
# If you want to use IPv6 check the advanced flags below and be aware of
# https://github.com/onedr0p/cluster-template/issues/1148
bootstrap_service_network: "10.96.0.0/16"
# (Required) The IP address of the Kube API, choose an available IP in
# your nodes host network that is NOT being used. This is announced over L2.
# For k3s kube-vip is used, built-in functionality is used with Talos
bootstrap_controllers_vip: ""
# (Optional) Add additional SANs to the Kube API cert, this is useful
# if you want to call the Kube API by hostname rather than IP
bootstrap_tls_sans: []
# (Required) Age Public Key (e.g. age1...)
# 1. Generate a new key with the following command:
# > task sops:age-keygen
# 2. Copy the PUBLIC key and paste it below
bootstrap_sops_age_pubkey: ""
# (Optional) Use cilium BGP control plane when L2 announcements won't traverse VLAN network segments.
# Needs a BGP capable router setup with the node IPs as peers.
# See: https://docs.cilium.io/en/latest/network/bgp-control-plane/
bootstrap_bgp:
enabled: false
# (Optional) If using multiple BGP peers add them here.
# Default is .1 derrived from host_network: ['x.x.x.1']
peers: []
# (Required) Set the BGP Autonomous System Number for the router(s) and nodes.
# If these match, iBGP will be used. If not, eBGP will be used.
peer_asn: "" # Router(s) AS
local_asn: "" # Node(s) AS
# (Required) The advertised CIDR for the cluster, this must NOT overlap with any
# existing networks and is usually a /16 (64K IPs).
# If you want to use IPv6 check the advanced flags below
advertised_network: ""
#
# 2. (Required) Flux details - Flux is used to manage the cluster configuration.
#
# (Required) GitHub repository URL
# For a public repo use the 'https://' URL (e.g. "https://github.com/onedr0p/cluster-template.git")
# For a private repo use the 'ssh://' URL (e.g. "ssh://[email protected]/onedr0p/cluster-template.git")
# If using a private repo make sure to following the instructions with the 'bootstrap_github_private_key' option below.
bootstrap_github_address: ""
# (Required) GitHub repository branch
bootstrap_github_branch: "main"
# (Required) Token for GitHub push-based sync
# 1. Generate a new token with the following command:
# > openssl rand -hex 16
# 2. Copy the token and paste it below
bootstrap_github_webhook_token: ""
# (Optional) Private key for Flux to access the GitHub repository
# 1. Generate a new key with the following command:
# > ssh-keygen -t ecdsa -b 521 -C "github-deploy-key" -f github-deploy.key -q -P ""
# 2. Make sure to paste public key from "github-deploy.key.pub" into
# the deploy keys section of your GitHub repository settings.
# 3. Uncomment and paste the private key below
# 4. Optionally set your repository on GitHub to private
# bootstrap_github_private_key: |
# -----BEGIN OPENSSH PRIVATE KEY-----
# ...
# -----END OPENSSH PRIVATE KEY-----
#
# 3. (Optional) Cloudflare details - Cloudflare is used for DNS, TLS certificates and tunneling.
#
bootstrap_cloudflare:
# (Required) Disable to manually setup and use a different DNS provider - setting this
# to false will not deploy a network namespace or the workloads contained within.
enabled: true
# (Required) Cloudflare Domain
domain: ""
# (Required) Cloudflare API Token (NOT API Key)
# 1. Head over to Cloudflare and create a API Token by going to
# https://dash.cloudflare.com/profile/api-tokens
# 2. Under the `API Tokens` section click the blue `Create Token` button.
# 3. Click the blue `Use template` button for the `Edit zone DNS` template.
# 4. Name your token something like `home-kubernetes`
# 5. Under `Permissions`, click `+ Add More` and add each permission below:
# `Zone - DNS - Edit`
# `Account - Cloudflare Tunnel - Read`
# 6. Limit the permissions to a specific account and zone resources.
# 7. Click the blue `Continue to Summary` button and then the blue `Create Token` button.
# 8. Copy the token and paste it below.
token: ""
# (Required) Optionals for Cloudflare Acme
acme:
# (Required) Any email you want to be associated with the ACME account (used for TLS certs via letsencrypt.org)
email: ""
# (Required) Use the ACME production server when requesting the wildcard certificate.
# By default the ACME staging server is used. This is to prevent being rate-limited.
# Update this option to `true` when you have verified the staging certificate
# works and then re-run `task configure` and push your changes to Github.
production: false
# (Required) Provide LAN access to the cluster ingresses for internal ingress classes
# The Load balancer IP for internal ingress, choose an available IP
# in your nodes host network that is NOT being used. This is announced over L2.
ingress_vip: ""
# (Required) Gateway is used for providing DNS to your cluster on LAN
# The Load balancer IP for k8s_gateway, choose an available IP
# in your nodes host network that is NOT being used. This is announced over L2.
gateway_vip: ""
# (Required) Options for Cloudflare Tunnel
# There's two methods to create a tunnel, via the CLI or the Cloudflare dashboard.
# 1. Authenticate cloudflared to your domain with the following command:
# > cloudflared tunnel login
# 2. Create the tunnel with the following command:
# > cloudflared tunnel create k8s
tunnel:
# (Required) Get the Cloudflared Tunnel ID with the following command:
# > jq -r .TunnelID ~/.cloudflared/*.json
id: ""
# (Required) Get the Cloudflare Account ID with the following command:
# > jq -r .AccountTag ~/.cloudflared/*.json
account_id: ""
# (Required) Get the Cloudflared Tunnel Secret with the following command:
# > jq -r .TunnelSecret ~/.cloudflared/*.json
secret: ""
# (Required) Provide WAN access to the cluster ingresses for external ingress classes
# The Load balancer IP for external ingress, choose an available IP
# in your nodes host network that is NOT being used. This is announced over L2.
ingress_vip: ""
# (Optional) Feature gates are used to enable experimental features
# bootstrap_feature_gates:
# # Enable Dual Stack IPv4 first
# # IMPORTANT: I am looking for people to help maintain IPv6 support since I cannot test it.
# # Ref: https://github.com/onedr0p/cluster-template/issues/1148
# # Keep in mind that Cilium does not currently support IPv6 L2 announcements.
# # Make sure you set cluster.pod_cidr and cluster.service_cidr
# # to a valid dual stack CIDRs, e.g. "10.42.0.0/16,fd00:10:244::/64"
# dual_stack_ipv4_first: false