From 5f3ba2f72b9833b2222f1f33b4af409d67c5e59e Mon Sep 17 00:00:00 2001 From: Nico <588438+rybnico@users.noreply.github.com> Date: Thu, 5 Dec 2024 10:58:23 +0100 Subject: [PATCH] feat: Add ability to specify range of VM IDs to use (#286) * Return VMIDFreeErr or the error object itself if CheckID returns true/an error * Rename VMIDFreeErr to ErrVMIDFree and fix comment to make linter happy * feat: Add ability to specify range of VM IDs to use * Fix codespell error: fix spelling * When checking if a vmid is free, first check the existing ProxmoxMachines before querying the Proxmox API. * Check that the vmid of the Proxmox machine is set (not -1) before adding it to usedVMIDs * Move spec.vmidRange from ProxmoxCluster to ProxmoxMachine * Update github.com/luthermonson/go-proxmox to v0.2.0 * Revert "Update github.com/luthermonson/go-proxmox to v0.2.0" This reverts commit c5d15e56ef7c61ab884c3726e2e9707ba8038a27. Because of this bug https://github.com/luthermonson/go-proxmox/pull/169 * Update github.com/luthermonson/go-proxmox to v0.2.1 * Add test for ClusterScope.ListProxmoxMachinesForCluster * Fix wording in ProxmoxMachine types test * Rename vmidRange to vmIDRange to follow k8s API conventions * Add validation for vmIDRange: end should be greater than or equal to start * Set failureMessage and failureReason when ErrNoVMIDInRangeFree is thrown * Refactor getVMID to improve code quality --------- Co-authored-by: Mohamed Chiheb Ben Jemaa --- api/v1alpha1/proxmoxmachine_types.go | 29 +++- api/v1alpha1/proxmoxmachine_types_test.go | 151 ++++++++++++------ api/v1alpha1/zz_generated.deepcopy.go | 20 +++ ...ture.cluster.x-k8s.io_proxmoxclusters.yaml | 26 +++ ...ster.x-k8s.io_proxmoxclustertemplates.yaml | 27 ++++ ...ture.cluster.x-k8s.io_proxmoxmachines.yaml | 25 +++ ...ster.x-k8s.io_proxmoxmachinetemplates.yaml | 25 +++ .../controller/proxmoxcluster_controller.go | 17 +- internal/service/vmservice/vm.go | 66 +++++++- internal/service/vmservice/vm_test.go | 101 ++++++++++++ pkg/proxmox/client.go | 2 + pkg/proxmox/goproxmox/api_client.go | 10 ++ pkg/proxmox/proxmoxtest/mock_client.go | 53 ++++++ pkg/scope/cluster.go | 14 ++ pkg/scope/cluster_test.go | 105 ++++++++++++ 15 files changed, 599 insertions(+), 72 deletions(-) diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index d2fd7119..4a3dce1d 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -23,7 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/errors" + clusterapierrors "sigs.k8s.io/cluster-api/errors" ) const ( @@ -101,6 +101,11 @@ type ProxmoxMachineSpec struct { // +optional Network *NetworkSpec `json:"network,omitempty"` + // VMIDRange is the range of VMIDs to use for VMs. + // +optional + // +kubebuilder:validation:XValidation:rule="self.end >= self.start",message="end should be greater than or equal to start" + VMIDRange *VMIDRange `json:"vmIDRange,omitempty"` + Checks *ProxmoxMachineChecks `json:"checks,omitempty"` } @@ -434,7 +439,7 @@ type ProxmoxMachineStatus struct { // can be added as events to the ProxmoxMachine object and/or logged in the // controller's output. // +optional - FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + FailureReason *clusterapierrors.MachineStatusError `json:"failureReason,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable @@ -471,6 +476,26 @@ type IPAddress struct { IPV6 string `json:"ipv6,omitempty"` } +// VMIDRange defines the range of VMIDs to use for VMs. +type VMIDRange struct { + // VMIDRangeStart is the start of the VMID range to use for VMs. + // +kubebuilder:validation:Minimum=100 + // +kubebuilder:validation:ExclusiveMinimum=false + // +kubebuilder:validation:Maximum=999999999 + // +kubebuilder:validation:ExclusiveMaximum=false + // +kubebuilder:validation:Required + Start int64 `json:"start"` + + // VMIDRangeEnd is the end of the VMID range to use for VMs. + // Only used if VMIDRangeStart is set. + // +kubebuilder:validation:Minimum=100 + // +kubebuilder:validation:ExclusiveMinimum=false + // +kubebuilder:validation:Maximum=999999999 + // +kubebuilder:validation:ExclusiveMaximum=false + // +kubebuilder:validation:Required + End int64 `json:"end"` +} + // +kubebuilder:object:root=true // +kubebuilder:storageversion // +kubebuilder:subresource:status diff --git a/api/v1alpha1/proxmoxmachine_types_test.go b/api/v1alpha1/proxmoxmachine_types_test.go index 13b8114b..0d0b0195 100644 --- a/api/v1alpha1/proxmoxmachine_types_test.go +++ b/api/v1alpha1/proxmoxmachine_types_test.go @@ -108,16 +108,18 @@ var _ = Describe("ProxmoxMachine Test", func() { Default: &NetworkDevice{ Bridge: "vmbr0", }, - AdditionalDevices: []AdditionalNetworkDevice{{ - NetworkDevice: NetworkDevice{}, - Name: "net0", - InterfaceConfig: InterfaceConfig{ - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "InClusterIPPool", - Name: "some-pool", - }}, - }, + AdditionalDevices: []AdditionalNetworkDevice{ + { + NetworkDevice: NetworkDevice{}, + Name: "net0", + InterfaceConfig: InterfaceConfig{ + IPv4PoolRef: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "some-pool", + }, + }, + }, }, } @@ -127,15 +129,17 @@ var _ = Describe("ProxmoxMachine Test", func() { It("Should only allow IPAM pool resources in IPv4PoolRef apiGroup", func() { dm := defaultMachine() dm.Spec.Network = &NetworkSpec{ - AdditionalDevices: []AdditionalNetworkDevice{{ - NetworkDevice: NetworkDevice{}, - Name: "net1", - InterfaceConfig: InterfaceConfig{ - IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("apps"), - Name: "some-app", - }}, - }, + AdditionalDevices: []AdditionalNetworkDevice{ + { + NetworkDevice: NetworkDevice{}, + Name: "net1", + InterfaceConfig: InterfaceConfig{ + IPv4PoolRef: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("apps"), + Name: "some-app", + }, + }, + }, }, } Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("ipv4PoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io"))) @@ -144,15 +148,16 @@ var _ = Describe("ProxmoxMachine Test", func() { It("Should only allow IPAM pool resources in IPv4PoolRef kind", func() { dm := defaultMachine() dm.Spec.Network = &NetworkSpec{ - AdditionalDevices: []AdditionalNetworkDevice{{ - NetworkDevice: NetworkDevice{}, - Name: "net1", - InterfaceConfig: InterfaceConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "ConfigMap", - Name: "some-app", - }}, - }, + AdditionalDevices: []AdditionalNetworkDevice{ + { + NetworkDevice: NetworkDevice{}, + Name: "net1", + InterfaceConfig: InterfaceConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "ConfigMap", + Name: "some-app", + }}, + }, }, } Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool"))) @@ -161,15 +166,17 @@ var _ = Describe("ProxmoxMachine Test", func() { It("Should only allow IPAM pool resources in IPv6PoolRef apiGroup", func() { dm := defaultMachine() dm.Spec.Network = &NetworkSpec{ - AdditionalDevices: []AdditionalNetworkDevice{{ - NetworkDevice: NetworkDevice{}, - Name: "net1", - InterfaceConfig: InterfaceConfig{ - IPv6PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("apps"), - Name: "some-app", - }}, - }, + AdditionalDevices: []AdditionalNetworkDevice{ + { + NetworkDevice: NetworkDevice{}, + Name: "net1", + InterfaceConfig: InterfaceConfig{ + IPv6PoolRef: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("apps"), + Name: "some-app", + }, + }, + }, }, } Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("ipv6PoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io"))) @@ -178,16 +185,18 @@ var _ = Describe("ProxmoxMachine Test", func() { It("Should only allow IPAM pool resources in IPv6PoolRef kind", func() { dm := defaultMachine() dm.Spec.Network = &NetworkSpec{ - AdditionalDevices: []AdditionalNetworkDevice{{ - NetworkDevice: NetworkDevice{}, - Name: "net1", - InterfaceConfig: InterfaceConfig{ - IPv6PoolRef: &corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "ConfigMap", - Name: "some-app", - }}, - }, + AdditionalDevices: []AdditionalNetworkDevice{ + { + NetworkDevice: NetworkDevice{}, + Name: "net1", + InterfaceConfig: InterfaceConfig{ + IPv6PoolRef: &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "ConfigMap", + Name: "some-app", + }, + }, + }, }, } Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool"))) @@ -196,10 +205,11 @@ var _ = Describe("ProxmoxMachine Test", func() { It("Should only allow Machine with additional devices with at least a pool ref", func() { dm := defaultMachine() dm.Spec.Network = &NetworkSpec{ - AdditionalDevices: []AdditionalNetworkDevice{{ - NetworkDevice: NetworkDevice{}, - Name: "net1", - }, + AdditionalDevices: []AdditionalNetworkDevice{ + { + NetworkDevice: NetworkDevice{}, + Name: "net1", + }, }, } Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("at least one pool reference must be set, either ipv4PoolRef or ipv6PoolRef"))) @@ -286,4 +296,43 @@ var _ = Describe("ProxmoxMachine Test", func() { Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be less than or equal to 4094"))) }) }) + + Context("VMIDRange", func() { + It("Should only allow spec.vmIDRange.start >= 100", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + Start: 1, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be greater than or equal to 100"))) + }) + It("Should only allow spec.vmIDRange.end >= 100", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + End: 1, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be greater than or equal to 100"))) + }) + It("Should only allow spec.vmIDRange.end >= spec.vmIDRange.start", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + Start: 101, + End: 100, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be greater than or equal to start"))) + }) + It("Should only allow spec.vmIDRange.start if spec.vmIDRange.end is set", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + Start: 100, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("spec.vmIDRange.end in body should be greater than or equal to 100"))) + }) + It("Should only allow spec.vmIDRange.end if spec.vmIDRange.start is set", func() { + dm := defaultMachine() + dm.Spec.VMIDRange = &VMIDRange{ + End: 100, + } + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("spec.vmIDRange.start in body should be greater than or equal to 100"))) + }) + }) }) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index fa1493f9..8fab5666 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -620,6 +620,11 @@ func (in *ProxmoxMachineSpec) DeepCopyInto(out *ProxmoxMachineSpec) { *out = new(NetworkSpec) (*in).DeepCopyInto(*out) } + if in.VMIDRange != nil { + in, out := &in.VMIDRange, &out.VMIDRange + *out = new(VMIDRange) + **out = **in + } if in.Checks != nil { in, out := &in.Checks, &out.Checks *out = new(ProxmoxMachineChecks) @@ -897,6 +902,21 @@ func (in *Storage) DeepCopy() *Storage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMIDRange) DeepCopyInto(out *VMIDRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMIDRange. +func (in *VMIDRange) DeepCopy() *VMIDRange { + if in == nil { + return nil + } + out := new(VMIDRange) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VRFDevice) DeepCopyInto(out *VRFDevice) { *out = *in diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 6dc1e677..a098bdbb 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -553,6 +553,32 @@ spec: for the ProxmoxMachine VM. format: int64 type: integer + vmIDRange: + description: VMIDRange is the range of VMIDs to use for + VMs. + properties: + end: + description: |- + VMIDRangeEnd is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: VMIDRangeStart is the start of the VMID + range to use for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to start + rule: self.end >= self.start required: - sourceNode type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 1dc4410e..f2c06a27 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -593,6 +593,33 @@ spec: for the ProxmoxMachine VM. format: int64 type: integer + vmIDRange: + description: VMIDRange is the range of VMIDs to + use for VMs. + properties: + end: + description: |- + VMIDRangeEnd is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: VMIDRangeStart is the start of + the VMID range to use for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to + start + rule: self.end >= self.start required: - sourceNode type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index a8c06f61..d9e0973d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -518,6 +518,31 @@ spec: VM. format: int64 type: integer + vmIDRange: + description: VMIDRange is the range of VMIDs to use for VMs. + properties: + end: + description: |- + VMIDRangeEnd is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: VMIDRangeStart is the start of the VMID range to + use for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to start + rule: self.end >= self.start required: - sourceNode type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index f8967773..db700ecc 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -551,6 +551,31 @@ spec: the ProxmoxMachine VM. format: int64 type: integer + vmIDRange: + description: VMIDRange is the range of VMIDs to use for VMs. + properties: + end: + description: |- + VMIDRangeEnd is the end of the VMID range to use for VMs. + Only used if VMIDRangeStart is set. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + start: + description: VMIDRangeStart is the start of the VMID range + to use for VMs. + format: int64 + maximum: 999999999 + minimum: 100 + type: integer + required: + - end + - start + type: object + x-kubernetes-validations: + - message: end should be greater than or equal to start + rule: self.end >= self.start required: - sourceNode type: object diff --git a/internal/controller/proxmoxcluster_controller.go b/internal/controller/proxmoxcluster_controller.go index cc0c8fb5..9f965147 100644 --- a/internal/controller/proxmoxcluster_controller.go +++ b/internal/controller/proxmoxcluster_controller.go @@ -133,7 +133,6 @@ func (r *ProxmoxClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque ProxmoxClient: r.ProxmoxClient, IPAMHelper: ipam.NewHelper(r.Client, proxmoxCluster.DeepCopy()), }) - if err != nil { return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err) } @@ -166,7 +165,7 @@ func (r *ProxmoxClusterReconciler) reconcileDelete(ctx context.Context, clusterS // Deletion usually should be triggered through the deletion of the owning cluster. // If the ProxmoxCluster was also flagged for deletion (e.g. deletion using the manifest file) // we should only allow to remove the finalizer when there are no ProxmoxMachines left. - machines, err := r.listProxmoxMachinesForCluster(ctx, clusterScope) + machines, err := clusterScope.ListProxmoxMachinesForCluster(ctx) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "could not retrieve proxmox machines for cluster %q", clusterScope.InfraClusterName()) } @@ -312,20 +311,6 @@ func (r *ProxmoxClusterReconciler) reconcileIPAM(ctx context.Context, clusterSco return reconcile.Result{}, nil } -func (r *ProxmoxClusterReconciler) listProxmoxMachinesForCluster(ctx context.Context, clusterScope *scope.ClusterScope) ([]infrav1alpha1.ProxmoxMachine, error) { - var machineList infrav1alpha1.ProxmoxMachineList - - err := r.List(ctx, &machineList, client.InNamespace(clusterScope.Namespace()), client.MatchingLabels{ - clusterv1.ClusterNameLabel: clusterScope.Name(), - }) - - if err != nil { - return nil, err - } - - return machineList.Items, nil -} - func (r *ProxmoxClusterReconciler) reconcileNormalCredentialsSecret(ctx context.Context, clusterScope *scope.ClusterScope) error { proxmoxCluster := clusterScope.ProxmoxCluster if !hasCredentialsRef(proxmoxCluster) { diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index a68aeb5b..c5c9c268 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -19,6 +19,7 @@ package vmservice import ( "context" + "slices" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -46,6 +47,9 @@ const ( optionMemory = "memory" ) +// ErrNoVMIDInRangeFree is returned if no free VMID is found in the specified vmIDRange. +var ErrNoVMIDInRangeFree = errors.New("No free vmid found in vmIDRange") + // ReconcileVM makes sure that the VM is in the desired state by: // 1. Creating the VM if it does not exist, then... // 2. Updating the VM with the bootstrap data, such as the cloud-init meta and user data, before... @@ -319,10 +323,19 @@ func getMachineAddresses(scope *scope.MachineScope) ([]clusterv1.MachineAddress, } func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneResponse, error) { + vmid, err := getVMID(ctx, scope) + if err != nil { + if errors.Is(err, ErrNoVMIDInRangeFree) { + scope.SetFailureMessage(err) + scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) + } + return proxmox.VMCloneResponse{}, err + } + options := proxmox.VMCloneRequest{ - Node: scope.ProxmoxMachine.GetNode(), - // NewID: 0, no need to provide newID - Name: scope.ProxmoxMachine.GetName(), + Node: scope.ProxmoxMachine.GetNode(), + NewID: int(vmid), + Name: scope.ProxmoxMachine.GetName(), } if scope.ProxmoxMachine.Spec.Description != nil { @@ -393,6 +406,53 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe return res, scope.InfraCluster.PatchObject() } +func getVMID(ctx context.Context, scope *scope.MachineScope) (int64, error) { + if scope.ProxmoxMachine.Spec.VMIDRange != nil { + vmIDRangeStart := scope.ProxmoxMachine.Spec.VMIDRange.Start + vmIDRangeEnd := scope.ProxmoxMachine.Spec.VMIDRange.End + if vmIDRangeStart != 0 && vmIDRangeEnd != 0 { + return getNextFreeVMIDfromRange(ctx, scope, vmIDRangeStart, vmIDRangeEnd) + } + } + // If VMIDRange is not defined, return 0 to let luthermonson/go-proxmox get the next free id. + return 0, nil +} + +func getNextFreeVMIDfromRange(ctx context.Context, scope *scope.MachineScope, vmIDRangeStart int64, vmIDRangeEnd int64) (int64, error) { + usedVMIDs, err := getUsedVMIDs(ctx, scope) + if err != nil { + return 0, err + } + // Get next free vmid from the range + for i := vmIDRangeStart; i <= vmIDRangeEnd; i++ { + if slices.Contains(usedVMIDs, i) { + continue + } + if vmidFree, err := scope.InfraCluster.ProxmoxClient.CheckID(ctx, i); err == nil && vmidFree { + return i, nil + } else if err != nil { + return 0, err + } + } + // Fail if we can't find a free vmid in the range. + return 0, ErrNoVMIDInRangeFree +} + +func getUsedVMIDs(ctx context.Context, scope *scope.MachineScope) ([]int64, error) { + // Get all used vmids from existing ProxmoxMachines + usedVMIDs := []int64{} + proxmoxMachines, err := scope.InfraCluster.ListProxmoxMachinesForCluster(ctx) + if err != nil { + return usedVMIDs, err + } + for _, proxmoxMachine := range proxmoxMachines { + if proxmoxMachine.GetVirtualMachineID() != -1 { + usedVMIDs = append(usedVMIDs, proxmoxMachine.GetVirtualMachineID()) + } + } + return usedVMIDs, nil +} + var selectNextNode = scheduler.ScheduleVM func unmountCloudInitISO(ctx context.Context, machineScope *scope.MachineScope) error { diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index 686abf98..001909ec 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -22,7 +22,10 @@ import ( "testing" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" @@ -181,6 +184,104 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing. require.True(t, machineScope.HasFailed()) } +func TestEnsureVirtualMachine_CreateVM_VMIDRange(t *testing.T) { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ + Start: 1000, + End: 1002, + } + + expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1001, Name: "test"} + response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1001)} + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1000)).Return(false, nil) + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(true, nil) + proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() + + requeue, err := ensureVirtualMachine(context.Background(), machineScope) + require.NoError(t, err) + require.True(t, requeue) + + require.Equal(t, int64(1001), machineScope.ProxmoxMachine.GetVirtualMachineID()) + require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) +} + +func TestEnsureVirtualMachine_CreateVM_VMIDRangeExhausted(t *testing.T) { + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ + Start: 1000, + End: 1002, + } + + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1000)).Return(false, nil) + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(false, nil) + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1002)).Return(false, nil) + + requeue, err := ensureVirtualMachine(context.Background(), machineScope) + require.Error(t, err, ErrNoVMIDInRangeFree) + require.False(t, requeue) + require.Equal(t, int64(-1), machineScope.ProxmoxMachine.GetVirtualMachineID()) +} + +func TestEnsureVirtualMachine_CreateVM_VMIDRangeCheckExisting(t *testing.T) { + machineScope, proxmoxClient, kubeClient := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.VMIDRange = &infrav1alpha1.VMIDRange{ + Start: 1000, + End: 1002, + } + + // Add a VM with ID 1000. + // Make sure the check for a free vmid skips 1000 by ensuring the Proxmox CheckID function isn't called more than once. + // It is called once when reconciling this test vm. + vm := newRunningVM() + vm.Name = "vm1000" + proxmoxClient.EXPECT().GetVM(context.Background(), "", int64(1000)).Return(vm, nil).Once() + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1000)).Return(false, nil).Once() + infraMachine := infrav1alpha1.ProxmoxMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1000", + }, + Spec: infrav1alpha1.ProxmoxMachineSpec{ + VirtualMachineID: ptr.To(int64(1000)), + }, + } + machine := clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1000", + }, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + Kind: "ProxmoxMachine", + Name: "vm1000", + }, + }, + } + machineScopeVMThousand, err := scope.NewMachineScope(scope.MachineScopeParams{ + Client: kubeClient, + Logger: machineScope.Logger, + Cluster: machineScope.Cluster, + Machine: &machine, + InfraCluster: machineScope.InfraCluster, + ProxmoxMachine: &infraMachine, + IPAMHelper: machineScope.IPAMHelper, + }) + require.NoError(t, err) + machineScopeVMThousand.SetVirtualMachineID(1000) + _, err = ensureVirtualMachine(context.Background(), machineScopeVMThousand) + require.NoError(t, err) + + expectedOptions := proxmox.VMCloneRequest{Node: "node1", NewID: 1002, Name: "test"} + response := proxmox.VMCloneResponse{Task: newTask(), NewID: int64(1002)} + proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1001)).Return(false, nil).Once() + proxmoxClient.Mock.On("CheckID", context.Background(), int64(1002)).Return(true, nil).Once() + + requeue, err := ensureVirtualMachine(context.Background(), machineScope) + require.NoError(t, err) + require.True(t, requeue) + require.Equal(t, int64(1002), machineScope.ProxmoxMachine.GetVirtualMachineID()) +} + func TestEnsureVirtualMachine_FindVM(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.SetVirtualMachineID(123) diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 4c5f9431..5fcf0e6f 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -31,6 +31,8 @@ type Client interface { FindVMResource(ctx context.Context, vmID uint64) (*proxmox.ClusterResource, error) + CheckID(ctx context.Context, vmID int64) (bool, error) + GetVM(ctx context.Context, nodeName string, vmID int64) (*proxmox.VirtualMachine, error) DeleteVM(ctx context.Context, nodeName string, vmID int64) (*proxmox.Task, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index e9ecef4a..23150053 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -184,6 +184,16 @@ func (c *APIClient) DeleteVM(ctx context.Context, nodeName string, vmID int64) ( return task, nil } +// CheckID checks if the vmid is available on the cluster. +// Returns true if the vmid is available, false if it is taken. +func (c *APIClient) CheckID(ctx context.Context, vmid int64) (bool, error) { + cluster, err := c.Cluster(ctx) + if err != nil { + return false, fmt.Errorf("cannot get cluster") + } + return cluster.CheckID(ctx, int(vmid)) +} + // GetTask returns a task associated with upID. func (c *APIClient) GetTask(ctx context.Context, upID string) (*proxmox.Task, error) { task := proxmox.NewTask(proxmox.UPID(upID), c.Client) diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index 8492503f..a488fed2 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -24,6 +24,59 @@ func (_m *MockClient) EXPECT() *MockClient_Expecter { return &MockClient_Expecter{mock: &_m.Mock} } +// CheckID provides a mock function with given fields: ctx, vmID +func (_m *MockClient) CheckID(ctx context.Context, vmID int64) (bool, error) { + ret := _m.Called(ctx, vmID) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64) (bool, error)); ok { + return rf(ctx, vmID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64) bool); ok { + r0 = rf(ctx, vmID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, vmID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_CheckID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckID' +type MockClient_CheckID_Call struct { + *mock.Call +} + +// CheckID is a helper method to define mock.On call +// - ctx context.Context +// - vmID int64 +func (_e *MockClient_Expecter) CheckID(ctx interface{}, vmID interface{}) *MockClient_CheckID_Call { + return &MockClient_CheckID_Call{Call: _e.mock.On("CheckID", ctx, vmID)} +} + +func (_c *MockClient_CheckID_Call) Run(run func(ctx context.Context, vmID int64)) *MockClient_CheckID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64)) + }) + return _c +} + +func (_c *MockClient_CheckID_Call) Return(_a0 bool, _a1 error) *MockClient_CheckID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_CheckID_Call) RunAndReturn(run func(context.Context, int64) (bool, error)) *MockClient_CheckID_Call { + _c.Call.Return(run) + return _c +} + // CloneVM provides a mock function with given fields: ctx, templateID, clone func (_m *MockClient) CloneVM(ctx context.Context, templateID int, clone proxmox.VMCloneRequest) (proxmox.VMCloneResponse, error) { ret := _m.Called(ctx, templateID, clone) diff --git a/pkg/scope/cluster.go b/pkg/scope/cluster.go index ef8a0bd3..c6c8187b 100644 --- a/pkg/scope/cluster.go +++ b/pkg/scope/cluster.go @@ -196,6 +196,20 @@ func (s *ClusterScope) PatchObject() error { return s.patchHelper.Patch(context.TODO(), s.ProxmoxCluster) } +// ListProxmoxMachinesForCluster returns all the ProxmoxMachines that belong to this cluster. +func (s *ClusterScope) ListProxmoxMachinesForCluster(ctx context.Context) ([]infrav1alpha1.ProxmoxMachine, error) { + var machineList infrav1alpha1.ProxmoxMachineList + + err := s.client.List(ctx, &machineList, client.InNamespace(s.Namespace()), client.MatchingLabels{ + clusterv1.ClusterNameLabel: s.Name(), + }) + if err != nil { + return nil, err + } + + return machineList.Items, nil +} + // Close closes the current scope persisting the cluster configuration and status. func (s *ClusterScope) Close() error { return s.PatchObject() diff --git a/pkg/scope/cluster_test.go b/pkg/scope/cluster_test.go index 5d8f982a..1a20902c 100644 --- a/pkg/scope/cluster_test.go +++ b/pkg/scope/cluster_test.go @@ -34,6 +34,7 @@ import ( infrav1alpha1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam" "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox" + "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest" ) func TestNewClusterScope_MissingParams(t *testing.T) { @@ -135,6 +136,110 @@ func TestNewClusterScope_SetupProxmoxClient(t *testing.T) { require.Error(t, err) } +func TestListProxmoxMachinesForCluster(t *testing.T) { + k8sClient := getFakeClient(t) + proxmoxClient := proxmoxtest.NewMockClient(t) + + cluster := &clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "proxmoxcluster", + Namespace: "default", + }, + } + err := k8sClient.Create(context.Background(), cluster) + require.NoError(t, err) + + proxmoxCluster := &infrav1alpha1.ProxmoxCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: infrav1alpha1.GroupVersion.String(), + Kind: "ProxmoxCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "proxmoxcluster", + Namespace: "default", + }, + Spec: infrav1alpha1.ProxmoxClusterSpec{ + AllowedNodes: []string{"pve", "pve-2"}, + CredentialsRef: &corev1.SecretReference{ + Name: "test-secret", + Namespace: "default", + }, + }, + } + err = k8sClient.Create(context.Background(), proxmoxCluster) + require.NoError(t, err) + + creds := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "default", + }, + StringData: map[string]string{ + "url": "https://localhost:8006", + "token": "test-token", + "secret": "test-secret", + }, + } + + err = k8sClient.Create(context.Background(), &creds) + require.NoError(t, err) + + params := ClusterScopeParams{Client: k8sClient, Cluster: cluster, ProxmoxCluster: proxmoxCluster, ProxmoxClient: proxmoxClient, IPAMHelper: &ipam.Helper{}} + clusterScope, err := NewClusterScope(params) + require.NoError(t, err) + + expectedMachineList := &infrav1alpha1.ProxmoxMachineList{ + Items: []infrav1alpha1.ProxmoxMachine{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "machine01", + Namespace: "default", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterScope.Name(), + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "machine02", + Namespace: "default", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterScope.Name(), + }, + }, + }, + }, + } + + for machineIdx := range expectedMachineList.Items { + err = k8sClient.Create(context.Background(), &expectedMachineList.Items[machineIdx]) + require.NoError(t, err) + // As the k8sClient sets ResourceVersion to 1, we also set it in the expectedMachineList. + expectedMachineList.Items[machineIdx].ResourceVersion = "1" + } + + unexpectedMachine := &infrav1alpha1.ProxmoxMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other-cluster-machine01", + Namespace: "default", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "other-cluster", + }, + }, + } + err = k8sClient.Create(context.Background(), unexpectedMachine) + require.NoError(t, err) + + machines, err := clusterScope.ListProxmoxMachinesForCluster(context.Background()) + + require.NoError(t, err) + require.Equal(t, expectedMachineList.Items, machines) +} + func getFakeClient(t *testing.T) ctrlclient.Client { scheme := runtime.NewScheme()