-
Notifications
You must be signed in to change notification settings - Fork 22
/
values.yaml
404 lines (390 loc) · 14 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
---
# The name of the Cluster API cluster
# if not given, the release name is used
clusterName:
# Settings for the CNI addon
cni:
# Indicates if a CNI should be deployed
enabled: true
# The CNI to deploy - supported values are calico or cilium
type: calico
# Settings for the calico CNI
# See https://projectcalico.docs.tigera.io/getting-started/kubernetes/helm
calico:
chart:
repo: https://projectcalico.docs.tigera.io/charts
name: tigera-operator
version: v3.29.1
release:
namespace: tigera-operator
values:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
# Nova metadata service
# See https://docs.openstack.org/nova/latest/user/metadata.html#the-metadata-service
globalNetworkPolicy:
denyNamespaceSelector: kubernetes.io/metadata.name != 'openstack-system'
allowPriority: 20
denyPriority: 10
allowEgressCidrs:
- "0.0.0.0/0"
denyEgressCidrs:
- "169.254.169.254/32"
allowv6EgressCidrs:
- "::/0"
denyv6EgressCidrs:
- "fe80::a9fe:a9fe/128"
# Settings for the Cilium CNI
# See https://docs.cilium.io/en/stable/gettingstarted/k8s-install-helm/ for details
cilium:
chart:
repo: https://helm.cilium.io/
name: cilium
version: 1.16.4
release:
namespace: kube-system
values: {}
# Settings for CSI addons
csi:
# Settings for the CephFS CSI
cephfs:
enabled: false
chart:
repo: https://ceph.github.io/csi-charts
name: ceph-csi-cephfs
version: 3.11.0
release:
namespace: csi-ceph-system
values: {}
# Settings for the OpenStack integrations
openstack:
# Indicates if the OpenStack integrations should be enabled
enabled: false
# The target namespace for the OpenStack integrations
targetNamespace: openstack-system
# cloud-config options for the OpenStack integrations
# The [Global] section is configured to use the target cloud
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#config-openstack-cloud-controller-manager
# and https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md#block-storage
cloudConfig:
# By default, ignore volume AZs for Cinder as most clouds have a single globally-attachable Cinder AZ
BlockStorage:
ignore-volume-az: true
# Settings for the Cloud Controller Manager (CCM)
ccm:
# Indicates if the OpenStack CCM should be enabled
# By default, the CCM is enabled if the OpenStack integrations are enabled
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/charts/openstack-cloud-controller-manager/values.yaml
enabled: true
chart:
repo: https://kubernetes.github.io/cloud-provider-openstack
name: openstack-cloud-controller-manager
version: 2.31.1
values: {}
# Settings for the Cinder CSI plugin
csiCinder:
# Indicates if the Cinder CSI should be enabled
# By default, it is enabled if the OpenStack integrations are enabled
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/charts/cinder-csi-plugin/values.yaml
enabled: true
chart:
repo: https://kubernetes.github.io/cloud-provider-openstack
name: openstack-cinder-csi
version: 2.31.2
values:
csi:
plugin:
controllerPlugin:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
# Definition of the default storage class for Cinder CSI
defaultStorageClass:
# Indicates if the storage class should be enabled
enabled: true
# The name of the storage class
name: csi-cinder
# Indicates if the Cinder default storage class is the cluster default storage class
isClusterDefault: true
# The reclaim policy for the storage class
reclaimPolicy: Delete
# Indicates if volume expansion is allowed
allowVolumeExpansion: true
# Controls when volume binding and dynamic provisioning should occur
volumeBindingMode: WaitForFirstConsumer
# The allowed topologies for the storage class
allowedTopologies:
# Filesystem type to use for volumes provisioned with the storage class
# If not given, the default filesystem type will be used
fstype:
# The Cinder availability zone to use for volumes provisioned by the storage class
availabilityZone: nova
# The Cinder volume type to use for volumes provisioned by the storage class
# If not given, the default volume type will be used
volumeType:
# Additional storage classes to create for the Cinder CSI
# For each item, the properties from the default storage class are supported (except enabled and isClusterDefault)
additionalStorageClasses: []
# Settings for the Manila CSI plugin
csiManila:
# Indicates if the Manila CSI should be enabled
# By default, it is disabled as Manila is not commonly available
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/charts/manila-csi-plugin/values.yaml
enabled: false
chart:
repo: https://kubernetes.github.io/cloud-provider-openstack
name: openstack-manila-csi
version: 2.30.0
values: {}
# Definition of the default storage class for the Manila CSI
defaultStorageClass:
# Indicates if the storage class should be enabled
enabled: true
# The name of the storage class
name: csi-manila
# Indicates if the Manila default storage class is the cluster default storage class
isClusterDefault: false
# The provisioner for the storage class
# If not given and the Ceph CSI plugin is installed, cephfs.manila.csi.openstack.org is used
provisioner:
# The reclaim policy for the storage class
reclaimPolicy: Delete
# Indicates if volume expansion is allowed
allowVolumeExpansion: true
# Controls when volume binding and dynamic provisioning should occur
volumeBindingMode: WaitForFirstConsumer
# The allowed topologies for the storage class
allowedTopologies:
# The parameters for the storage class
# See https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/manila-csi-plugin/using-manila-csi-plugin.md#controller-service-volume-parameters
parameters:
# The Manila share type to use
# If not given and the Ceph CSI plugin is installed, cephfs is used
type:
# Additional storage classes to create for the Manila CSI
# For each item, the properties from the default storage class are supported (except for "enabled")
additionalStorageClasses: []
k8sKeystoneAuth:
enabled: false
targetNamespace: kube-system
chart:
repo: https://catalyst-cloud.github.io/capi-plugin-helm-charts
name: k8s-keystone-auth
version: 1.3.0
# Settings for etcd defragmentation jobs
etcdDefrag:
# Indicates if the etcd defragmentation job should be enabled
enabled: true
chart:
repo: https://azimuth-cloud.github.io/capi-helm-charts
name: etcd-defrag
version: # Defaults to the same version as this chart
release:
# This should be namespace in which the etcd pods are deployed
namespace: kube-system
values: {}
# Settings for the metrics server
# https://github.com/kubernetes-sigs/metrics-server#helm-chart
metricsServer:
# Indicates if the metrics server should be deployed
enabled: true
chart:
repo: https://kubernetes-sigs.github.io/metrics-server
name: metrics-server
version: 3.12.2
release:
namespace: kube-system
values: {}
# Settings for the Kubernetes dashboard
# https://github.com/kubernetes/dashboard/tree/master/charts/helm-chart/kubernetes-dashboard
kubernetesDashboard:
# Indicates if the Kubernetes dashboard should be enabled
enabled: false
chart:
repo: https://kubernetes.github.io/dashboard
name: kubernetes-dashboard
version: 6.0.8
release:
namespace: kubernetes-dashboard
values: {}
# Settings for ingress controllers
ingress:
# Indicates if ingress controllers should be enabled
enabled: false
# Settings for the Nginx ingress controller
# https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx#configuration
nginx:
# Indicates if the Nginx ingress controller should be enabled
# The Nginx ingress controller is enabled by default if ingress controllers are enabled
enabled: true
chart:
repo: https://kubernetes.github.io/ingress-nginx
name: ingress-nginx
version: 4.11.3
release:
namespace: ingress-nginx
values: {}
# Settings for cluster monitoring
monitoring:
# Indicates if the cluster monitoring should be enabled
enabled: false
# Config for the kube-prometheus-stack helm chart
# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack
kubePrometheusStack:
chart:
repo: https://prometheus-community.github.io/helm-charts
name: kube-prometheus-stack
version: 66.1.1
release:
namespace: monitoring-system
values:
# Enable persistence by default for prometheus and alertmanager
alertmanager:
alertmanagerSpec:
# By default, retain 7 days of data
retention: 168h
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
prometheus:
prometheusSpec:
# The amount of data that is retained will be 90 days or 95% of the size of the
# persistent volume, whichever is reached first
retention: 90d
storageSpec:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
lokiStack:
enabled: true
chart:
repo: https://grafana.github.io/helm-charts
name: loki-stack
version: 2.10.2
release:
namespace: monitoring-system
values:
loki:
# Enable retention and configure a default retention period of 31 days
config:
compactor:
retention_enabled: true
limits_config:
retention_period: 72h
# Enable persistence by default
persistence:
enabled: true
size: 10Gi
# Configuration for the blackbox exporter
blackboxExporter:
enabled: true
chart:
repo: https://prometheus-community.github.io/helm-charts
name: prometheus-blackbox-exporter
version: 9.1.0
release:
namespace: monitoring-system
values: {}
# Example of adding additional scrape targets
# serviceMonitor:
# targets:
# - name: example
# url: http://example.com/healthz
# Settings for node feature discovery
# https://github.com/kubernetes-sigs/node-feature-discovery/tree/master/deployment/helm/node-feature-discovery
nodeFeatureDiscovery:
# Indicates if node feature discovery should be enabled
enabled: true
chart:
repo: https://kubernetes-sigs.github.io/node-feature-discovery/charts
name: node-feature-discovery
version: 0.16.6
release:
namespace: node-feature-discovery
values: {}
# Settings for the NVIDIA GPU operator
nvidiaGPUOperator:
# Indicates if the NVIDIA GPU operator should be enabled
# Note that because it uses node feature discovery to run only on nodes
# with an NVIDIA GPU available, the overhead of enabling this on clusters
# that do not need it now but may need it in the future is low
enabled: true
chart:
repo: https://helm.ngc.nvidia.com/nvidia
name: gpu-operator
version: v24.9.0
release:
namespace: gpu-operator
values:
dcgmExporter:
serviceMonitor:
enabled: true
# Settings for the Mellanox network operator
mellanoxNetworkOperator:
# Indicates if the network operator should be enabled
# Note that because it uses node feature discovery to run only on nodes
# with a Mellanox NIC available, the overhead of enabling this on clusters
# that do not need it now but may need it in the future is low
enabled: true
chart:
repo: https://helm.ngc.nvidia.com/nvidia
name: network-operator
version: 23.7.0
release:
namespace: network-operator
values: {}
# Settings for the node problem detector
nodeProblemDetector:
# Indicates if the node problem detector should be enabled
enabled: true
chart:
repo: https://charts.deliveryhero.io
name: node-problem-detector
version: 2.3.14
release:
namespace: node-problem-detector
values: {}
# Settings for any custom addons
custom: {}
# # Indexed by the name of the release on the target cluster
# my-custom-helm-release:
# # Indicates that this is a Helm addon
# kind: HelmRelease
# spec:
# # The namespace that the addon should be in
# namespace: my-namespace
# # Details of the Helm chart to use
# chart:
# # The chart repository that contains the chart to use
# repo: https://my-project/charts
# # The name of the chart to use
# name: my-chart
# # The version of the chart to use (must be an exact version)
# version: 1.5.0
# # The Helm values to use for the release
# values: {}
# # Indexed by the name of the release on the target cluster
# my-custom-manifests:
# # Indicates that this is a Manifests addon
# kind: Manifests
# spec:
# # The namespace that the addon should be in
# namespace: my-namespace
# # The manifests for the addon, indexed by filename
# manifests:
# secret.yaml: |-
# apiVersion: v1
# kind: Secret
# metadata:
# name: my-secret
# stringData:
# secret-file: "secret-data"