From 326c8ebb09ed853a6f1dcefac4634ce226e538e7 Mon Sep 17 00:00:00 2001 From: Vasil Atanasov <141020316+vasilsatanasov@users.noreply.github.com> Date: Thu, 12 Oct 2023 20:42:56 +0300 Subject: [PATCH] Fixing cloning regression on DS cluster (#2037) Issue is that when vm is cloned on DS cluster the datastore_id field in the relocate spec holds invalid value which causes the vCenter to throw an error. The fix was to restore previously implemented behavior to not send relocate specs for the VM hard disks when it is cloned on DS Cluster with exception when datastore_id is explicitly specified for the Hard Disk. Added TestAccResourceVSphereVirtualMachine_cloneOnDsCuster to cover the case. Fixes #2034 Signed-off-by: Vasil Atanasov --- vsphere/internal/helper/testhelper/testing.go | 10 ++ .../virtual_machine_disk_subresource.go | 26 ++++- .../resource_vsphere_virtual_machine_test.go | 97 +++++++++++++++++++ 3 files changed, 128 insertions(+), 5 deletions(-) diff --git a/vsphere/internal/helper/testhelper/testing.go b/vsphere/internal/helper/testhelper/testing.go index 0bb6221a8..21a3bd8aa 100644 --- a/vsphere/internal/helper/testhelper/testing.go +++ b/vsphere/internal/helper/testhelper/testing.go @@ -204,3 +204,13 @@ func ConfigDataRootVMNet() string { } ` } + +func ConfigDSClusterData() string { + return fmt.Sprintf(` + data "vsphere_datastore_cluster" "ds_cluster1" { + name = "%s" + datacenter_id = data.vsphere_datacenter.rootdc1.id + } + +`, os.Getenv("TF_VAR_VSPHERE_DS_CLUSTER1")) +} diff --git a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go index 0c78d09bd..b36048999 100644 --- a/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go +++ b/vsphere/internal/virtualdevice/virtual_machine_disk_subresource.go @@ -972,6 +972,14 @@ func shouldAddRelocateSpec(d *schema.ResourceData, disk *types.VirtualDisk, sche diskProps := virtualDiskToSchemaPropsMap(disk) dataProps := diskDataToSchemaProps(d, schemaDiskIndex) + // If the VM is cloned to a datastore cluster and no datastore is specified for the disk + // it should not be added to the relocate spec + diskDataStoreId, _ := dataProps["datastore_id"].(string) + diskDsIsEmpty := diskDataStoreId == "" || diskDataStoreId == diskDatastoreComputedName + if d.Get("datastore_id") == "" && d.Get("datastore_cluster_id") != "" && diskDsIsEmpty { + return false + } + for _, key := range relocateProperties { dataProp, dataPropOk := dataProps[key] diskProp, diskPropOk := diskProps[key] @@ -1024,23 +1032,31 @@ func virtualDiskToSchemaPropsMap(disk *types.VirtualDisk) map[string]interface{} func diskDataToSchemaProps(d *schema.ResourceData, deviceIndex int) map[string]interface{} { m := make(map[string]interface{}) - diskKey := fmt.Sprintf("disk.%d.datastore_id", deviceIndex) - if datastoreId, ok := d.GetOk(diskKey); ok { + datastoreKey := fmt.Sprintf("disk.%d.datastore_id", deviceIndex) + if datastoreId, ok := d.GetOk(datastoreKey); ok { m["datastore_id"] = datastoreId } - if diskMode, ok := d.GetOk(diskKey); ok { + diskModeKey := fmt.Sprintf("disk.%d.disk_mode", deviceIndex) + if diskMode, ok := d.GetOk(diskModeKey); ok { m["disk_mode"] = diskMode } - if eagerlyScrub, ok := d.GetOk(diskKey); ok { + eagerlyScrubKey := fmt.Sprintf("disk.%d.eagerly_scrub", deviceIndex) + if eagerlyScrub, ok := d.GetOk(eagerlyScrubKey); ok { m["eagerly_scrub"] = eagerlyScrub } - if thinProvisioned, ok := d.GetOk(diskKey); ok { + thinProvisionedKey := fmt.Sprintf("disk.%d.thin_provisioned", deviceIndex) + if thinProvisioned, ok := d.GetOk(thinProvisionedKey); ok { m["thin_provisioned"] = thinProvisioned } + writeThroughKey := fmt.Sprintf("disk.%d.write_through", deviceIndex) + if writeThrough, ok := d.GetOk(writeThroughKey); ok { + m["write_through"] = writeThrough + } + return m } diff --git a/vsphere/resource_vsphere_virtual_machine_test.go b/vsphere/resource_vsphere_virtual_machine_test.go index 97ff7b158..05ec04ff1 100644 --- a/vsphere/resource_vsphere_virtual_machine_test.go +++ b/vsphere/resource_vsphere_virtual_machine_test.go @@ -1849,6 +1849,27 @@ func TestAccResourceVSphereVirtualMachine_cloneWithDiskTypeChange(t *testing.T) }) } +func TestAccResourceVSphereVirtualMachine_cloneOnDsCuster(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + RunSweepers() + testAccPreCheck(t) + testAccResourceVSphereVirtualMachinePreCheck(t) + testAccDsClusterRequiredPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereVirtualMachineCheckExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereVirtualMachineConfigCloneChangedDiskTypeDsCluster(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereVirtualMachineCheckExists(true), + ), + }, + }, + }) +} + func TestAccResourceVSphereVirtualMachine_cpuHotAdd(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -2541,6 +2562,12 @@ func testAccResourceVSphereVirtualMachinePreCheck(t *testing.T) { } } +func testAccDsClusterRequiredPreCheck(t *testing.T) { + if os.Getenv("TF_VAR_VSPHERE_DS_CLUSTER1") == "" { + t.Skip("TF_VAR_VSPHERE_DS_CLUSTER1 must be set with a name of a DS cluster in order to run tests which require DS cluster ") + } +} + func testAccResourceVSphereVirtualMachineCheckExists(expected bool) resource.TestCheckFunc { return func(s *terraform.State) error { _, err := testGetVirtualMachine(s, "vm") @@ -5768,6 +5795,76 @@ resource "vsphere_virtual_machine" "vm" { ) } +func testAccResourceVSphereVirtualMachineConfigCloneChangedDiskTypeDsCluster() string { + return fmt.Sprintf(` + %s + +data "vsphere_network" "network" { + name = "VM Network" + datacenter_id = data.vsphere_datacenter.rootdc1.id +} + +resource "vsphere_virtual_machine" "template" { + name = "vm-1-template" + resource_pool_id = data.vsphere_compute_cluster.rootcompute_cluster1.resource_pool_id + datastore_id = data.vsphere_datastore.rootds1.id + + num_cpus = 2 + memory = 2048 + guest_id = "other3xLinuxGuest" + + network_interface { + network_id = data.vsphere_network.network.id + } + + + wait_for_guest_ip_timeout = 0 + wait_for_guest_net_timeout = 0 + + disk { + label = "disk0" + size = 4 + } +} + + + +resource "vsphere_virtual_machine" "vm" { + name = "vm-1-template-clone" + resource_pool_id = data.vsphere_compute_cluster.rootcompute_cluster1.resource_pool_id + guest_id = vsphere_virtual_machine.template.guest_id + network_interface { + network_id = data.vsphere_network.network.id + } + datastore_cluster_id = data.vsphere_datastore_cluster.ds_cluster1.id + + num_cpus = 2 + memory = 2048 + + scsi_type =vsphere_virtual_machine.template.scsi_type + wait_for_guest_ip_timeout = 0 + wait_for_guest_net_timeout = 0 + + disk { + label = "disk0" + size = vsphere_virtual_machine.template.disk.0.size + } + + clone { + template_uuid = vsphere_virtual_machine.template.id + } +} + +`, + testhelper.CombineConfigs( + testhelper.ConfigDataRootDC1(), + testhelper.ConfigDataRootComputeCluster1(), + testhelper.ConfigDataRootDS1(), + testhelper.ConfigDSClusterData(), + ), + ) +} + func testAccResourceVSphereVirtualMachineConfigWithHotAdd(nc, nm int, cha, chr, mha bool) string { return fmt.Sprintf(`