Skip to content

Commit

Permalink
vcluster config convert statefulSet.affinity bugfix (#2329) (#2345)
Browse files Browse the repository at this point in the history
* add test case reproducing convert bug

Signed-off-by: Paweł Bojanowski <[email protected]>

* fix vcluster config convert overwritting statefulSet affinity; do not prune ephemeral-storage null values

Signed-off-by: Paweł Bojanowski <[email protected]>

* do not prune default null values

Signed-off-by: Paweł Bojanowski <[email protected]>

* prune keys for empty maps etc. but keep them for null values

Signed-off-by: Paweł Bojanowski <[email protected]>

---------

Signed-off-by: Paweł Bojanowski <[email protected]>
(cherry picked from commit b4baae9)

Co-authored-by: Paweł Bojanowski <[email protected]>
  • Loading branch information
loft-bot and hidalgopl authored Dec 19, 2024
1 parent 6fd11c5 commit 4542ec6
Show file tree
Hide file tree
Showing 3 changed files with 191 additions and 22 deletions.
6 changes: 4 additions & 2 deletions config/diff.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,16 @@ func prune(in interface{}) interface{} {

for k, v := range inType {
inType[k] = prune(v)
if inType[k] == nil {
// delete key only if original value was not nil (but for example empty map),
// otherwise we want to keep null as a value
if inType[k] == nil && v != nil {
delete(inType, k)
}
}

if len(inType) == 0 {
return nil
}

return inType
default:
return in
Expand Down
3 changes: 2 additions & 1 deletion config/legacyconfig/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,13 +274,14 @@ func convertBaseValues(oldConfig BaseHelm, newConfig *config.Config) error {
}

newConfig.Networking.Advanced.FallbackHostCluster = oldConfig.FallbackHostDNS

newConfig.ControlPlane.StatefulSet.Labels = oldConfig.Labels
newConfig.ControlPlane.StatefulSet.Annotations = oldConfig.Annotations
newConfig.ControlPlane.StatefulSet.Pods.Labels = oldConfig.PodLabels
newConfig.ControlPlane.StatefulSet.Pods.Annotations = oldConfig.PodAnnotations
newConfig.ControlPlane.StatefulSet.Scheduling.Tolerations = oldConfig.Tolerations
newConfig.ControlPlane.StatefulSet.Scheduling.NodeSelector = oldConfig.NodeSelector
newConfig.ControlPlane.StatefulSet.Scheduling.Affinity = oldConfig.Affinity
newConfig.ControlPlane.StatefulSet.Scheduling.Affinity = mergeMaps(newConfig.ControlPlane.StatefulSet.Scheduling.Affinity, oldConfig.Affinity)
newConfig.ControlPlane.StatefulSet.Scheduling.PriorityClassName = oldConfig.PriorityClassName

newConfig.Networking.ReplicateServices.FromHost = oldConfig.MapServices.FromHost
Expand Down
204 changes: 185 additions & 19 deletions config/legacyconfig/migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,8 +297,7 @@ sync:
enabled: true
statefulSet:
scheduling:
podManagementPolicy: OrderedReady
`,
podManagementPolicy: OrderedReady`,
},
{
Name: "embedded etcd",
Expand Down Expand Up @@ -463,9 +462,7 @@ controlPlane:
tag: v1.30.2-k3s2
statefulSet:
scheduling:
podManagementPolicy: OrderedReady
`,
podManagementPolicy: OrderedReady`,
ExpectedErr: "migrate legacy k3s values: config is already in correct format",
},
{
Expand All @@ -484,26 +481,195 @@ controlPlane:
enabled: true
statefulSet:
scheduling:
podManagementPolicy: OrderedReady
`,
podManagementPolicy: OrderedReady`,
ExpectedErr: "migrate legacy k8s values: config is already in correct format",
},
{
Name: "statefulset affinity added",
Distro: "k8s",
In: `isolation:
# nodeProxyPermission:
# enabled: true
enabled: true
podSecurityStandard: baseline
resourceQuota:
enabled: true
quota:
count/endpoints: null
count/pods: null
count/services: null
count/configmaps: null
count/secrets: null
count/persistentvolumeclaims: null
limits.cpu: 256
limits.memory: 1Ti
requests.storage: 10Ti
requests.ephemeral-storage: null
requests.memory: 128Gi
requests.cpu: 120
services.loadbalancers: null
services.nodeports: null
limitRange:
enabled: true
defaultRequest:
cpu: 24m
memory: 32Mi
ephemeral-storage: null
default:
ephemeral-storage: null
memory: 2Gi
cpu: 512m
# max:
# cpu: 32
# memory: 64Gi
# ephemeral-storage: 512Gi
networkPolicy:
enabled: false
storage:
className: px-pool
sync:
secrets:
enabled: true
nodes:
enabled: true
networkpolicies:
enabled: true
hoststorageclasses:
enabled: true
# enableHA: true
embeddedEtcd:
enabled: true
syncer:
resources:
limits:
cpu: '8'
ephemeral-storage: 8Gi
memory: 10Gi
# extraArgs:
# - '--sync-labels=namespace,aussiebb.io/,..aussiebb.io/'
replicas: 3
labels:
aussiebb.io/profile: "true"
storage:
size: 50Gi
className: px-pool-etcd
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- vcluster
topologyKey: "kubernetes.io/hostname"
coredns:
replicas: 3
resources:
limits:
cpu: '2'
memory: '1Gi'
api:
extraArgs:
- "-v=4"`,
Expected: `controlPlane:
backingStore:
etcd:
embedded:
enabled: true
coredns:
deployment:
replicas: 3
resources:
limits:
cpu: "2"
memory: 1Gi
distro:
k8s:
apiServer:
extraArgs:
- -v=4
enabled: true
statefulSet:
highAvailability:
replicas: 3
persistence:
volumeClaim:
size: 50Gi
storageClass: px-pool-etcd
resources:
limits:
cpu: "8"
memory: 10Gi
scheduling:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- vcluster
topologyKey: kubernetes.io/hostname
podManagementPolicy: OrderedReady
policies:
limitRange:
default:
cpu: 512m
ephemeral-storage: null
memory: 2Gi
defaultRequest:
cpu: 24m
ephemeral-storage: null
memory: 32Mi
enabled: true
podSecurityStandard: baseline
resourceQuota:
enabled: true
quota:
count/configmaps: null
count/endpoints: null
count/persistentvolumeclaims: null
count/pods: null
count/secrets: null
count/services: null
limits.cpu: 256
limits.memory: 1Ti
requests.cpu: 120
requests.ephemeral-storage: null
requests.memory: 128Gi
requests.storage: 10Ti
services.loadbalancers: null
services.nodeports: null
sync:
fromHost:
nodes:
enabled: true
storageClasses:
enabled: true
toHost:
networkPolicies:
enabled: true`,
ExpectedErr: "",
},
}

for _, testCase := range testCases {
out, err := MigrateLegacyConfig(testCase.Distro, testCase.In)
if err != nil {
if testCase.ExpectedErr != "" && testCase.ExpectedErr == err.Error() {
continue
}
t.Run(testCase.Name, func(t *testing.T) {
out, err := MigrateLegacyConfig(testCase.Distro, testCase.In)
if err != nil {
if testCase.ExpectedErr != "" && testCase.ExpectedErr == err.Error() {
return
}

t.Fatalf("Test case %s failed with: %v", testCase.Name, err)
}
t.Fatalf("Test case %s failed with: %v", testCase.Name, err)
}

if strings.TrimSpace(testCase.Expected) != strings.TrimSpace(out) {
t.Log(out)
}
assert.Equal(t, strings.TrimSpace(testCase.Expected), strings.TrimSpace(out), testCase.Name)
if strings.TrimSpace(testCase.Expected) != strings.TrimSpace(out) {
t.Log(out)
}
assert.Equal(t, strings.TrimSpace(testCase.Expected), strings.TrimSpace(out), testCase.Name)
})
}
}

0 comments on commit 4542ec6

Please sign in to comment.