forked from luizalabs/helm-rke2-o7k
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
160 lines (160 loc) · 4.48 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
cloudprovider: openstack
imageRegistryURL: ""
cloudinit:
enable: false
sshPubKeys: []
bootcmd:
- sed -i -e '/net.ipv4.conf.*.rp_filter/d' $(grep -ril '\.rp_filter' /etc/sysctl.d/ /usr/lib/sysctl.d/)
- sysctl -a | grep '\.rp_filter' | awk '{print $1" = 0"}' > /etc/sysctl.d/1000-cilium.conf
- sysctl --system
runcmd:
- sed -i -e '/net.ipv4.conf.*.rp_filter/d' $(grep -ril '\.rp_filter' /etc/sysctl.d/ /usr/lib/sysctl.d/)
- sysctl -a | grep '\.rp_filter' | awk '{print $1" = 0"}' > /etc/sysctl.d/1000-cilium.conf
- sysctl --system
openstack:
authUrl: https://openstack.example.com:5000
applicationCredentialId:
applicationCredentialSecret:
availabilityZone: nova
subnetID:
projectId:
tenantDomainName: Default
tenantName:
username:
domainName: default
region: RegionOne
floatingNetID:
floatingSubnetID:
openstackClientImage: openstacktools/openstack-client
cluster:
apiAddr: kubernetes.default.svc.cluster.local
apiPort: 6443
additionalManifests: {}
secretsEncryption: false
upgradeStrategy:
controlPlaneDrainOptions:
enabled: false
deleteEmptyDirData: false
disableEviction: false
gracePeriod: 0
ignoreErrors: false
skipWaitForDeleteTimeoutSeconds: 0
timeout: 0
workerDrainOptions:
enabled: false
deleteEmptyDirData: false
disableEviction: false
gracePeriod: 0
ignoreErrors: false
skipWaitForDeleteTimeoutSeconds: 0
timeout: 0
workerConcurrency: "10%"
controlPlaneConcurrency: "10%"
name: placeholder-cluster-name
kubernetesVersion: v1.21.14+rke2r1
cni:
name: cilium
autoscaler:
enabled: true
rancherUrl: https://rancher.placeholder.com
rancherToken: rancher-token
image: luizalabscicdmgc/cluster-autoscaler-amd64:dev
monitoring:
enabled: false
rke:
rkeIngressChart:
enabled: true
replicaCount: "1"
autoScaling:
enabled: true
minReplicas: "1"
maxReplicas: "3"
additionalChartValues:
rke2-cilium:
cilium:
mtu: 1430 # can be changed
hubble:
metrics:
enabled:
- dns:query;ignoreAAAA
- drop
- tcp
- flow
- icmp
- http
relay:
enabled: true
image:
repository: "cilium/hubble-relay"
tag: "v1.12.1"
ui:
backend:
image:
repository: "cilium/hubble-ui-backend"
tag: "v0.9.2"
enabled: true
frontend:
image:
repository: "cilium/hubble-ui"
tag: "v0.9.2"
replicas: 1
image:
repository: "rancher/mirrored-cilium-cilium"
tag: "v1.12.1"
nodeinit:
image:
repository: "rancher/mirrored-cilium-startup-script"
tag: "d69851597ea019af980891a4628fb36b7880ec26"
operator:
image:
repository: "rancher/mirrored-cilium-operator"
tag: "v1.12.1"
preflight:
image:
repository: "rancher/mirrored-cilium-cilium"
tag: "v1.12.1"
kubeProxyReplacement: "strict"
k8sServiceHost: kubernetes.default.svc.cluster.local
k8sServicePort: 6443
rkeIngressRawManifest:
enabled: false
etcd:
args:
- "quota-backend-bytes=858993459"
- "max-request-bytes=33554432"
exposeMetrics: true
snapshotRetention: 5
snapshotScheduleCron: "0 */12 * * *"
coredns:
nodelocal:
enabled: true
openstackControllerManager:
image: k8scloudprovider/openstack-cloud-controller-manager
tag: v1.24.0
enableLoadBalancerCreateMonitor: false
cinderCsiPlugin:
image: k8scloudprovider/cinder-csi-plugin
tag: v1.25.0
registries:
enabled: false
configs:
gcr.io:
authConfigSecretName: secret-example
caBundle: ''
insecureSkipVerify: false
mirrors:
gcr.io:
endpoint:
- 'https://gcr.io'
nodeScripts: []
agentEnvVars: []
kubeapi:
args: {}
kubelet:
args: {}
localClusterAuthEndpoint:
enabled: false
fqdn: example.rancher.local
secretName: example-rancher-local-secret
tlsSan: []
nodepools: []