diff --git a/.github/workflows/acceptance-tests.yaml b/.github/workflows/acceptance-tests.yaml index bf19523ee..063a6d545 100644 --- a/.github/workflows/acceptance-tests.yaml +++ b/.github/workflows/acceptance-tests.yaml @@ -42,7 +42,7 @@ jobs: echo "VSPHERE_REST_SESSION_PATH=$(pwd)/rest_sessions" >> $GITHUB_ENV - name: Set up Terraform - uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3 + uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0 with: terraform_wrapper: false @@ -63,7 +63,7 @@ jobs: go-version-file: '.go-version' - name: Set up Node - uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1 + uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0 with: node-version: 18 diff --git a/.github/workflows/issue_greeting.yml b/.github/workflows/issue_greeting.yml index d6cfae06d..851f12487 100644 --- a/.github/workflows/issue_greeting.yml +++ b/.github/workflows/issue_greeting.yml @@ -22,7 +22,7 @@ jobs: vars: | author: ${{ github.actor }} - name: Create Comment - uses: peter-evans/create-or-update-comment@c6c9a1a66007646a28c153e2a8580a5bad27bcfa # v3.0.2 + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 with: issue-number: '${{ github.event.issue.number }}' body: '${{ steps.template.outputs.result }}' diff --git a/CHANGELOG.md b/CHANGELOG.md index 978d1055b..2dfd8923f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,9 +2,14 @@ ## 2.6.0 (Unreleased) +BUG FIXES: + +* `resource/virtual_machine`: Resolves upload error when deploying an OVF/OVA directly to an ESXi host. ([#1813](https://github.com/terraform-providers/terraform-provider-vsphere/pull/1813)) + FEATURES: * `resource/compute_cluster`: Adds support for vSAN Express Storage Architecture in vSphere 8.0. ([#1874](https://github.com/terraform-providers/terraform-provider-vsphere/pull/1874)) +* `resource/compute_cluster`: Adds support for vSAN stretched clusters. ([#1885](https://github.com/hashicorp/terraform-provider-vsphere/pull/1885/)) ## 2.5.1 (October 12, 2023) diff --git a/go.mod b/go.mod index 92aab9217..707a28e74 100644 --- a/go.mod +++ b/go.mod @@ -55,6 +55,6 @@ require ( golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.57.0 // indirect + google.golang.org/grpc v1.57.1 // indirect google.golang.org/protobuf v1.31.0 // indirect ) diff --git a/go.sum b/go.sum index 0e1b0ca8a..430339f80 100644 --- a/go.sum +++ b/go.sum @@ -195,8 +195,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg= +google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/vsphere/internal/helper/ovfdeploy/ovf_helper.go b/vsphere/internal/helper/ovfdeploy/ovf_helper.go index 719455a77..b6f78f923 100644 --- a/vsphere/internal/helper/ovfdeploy/ovf_helper.go +++ b/vsphere/internal/helper/ovfdeploy/ovf_helper.go @@ -49,7 +49,7 @@ func (pr *ProgressReader) Read(p []byte) (n int, err error) { return } -func DeployOvfAndGetResult(ovfCreateImportSpecResult *types.OvfCreateImportSpecResult, resourcePoolObj *object.ResourcePool, +func DeployOvfAndGetResult(client *govmomi.Client, ovfCreateImportSpecResult *types.OvfCreateImportSpecResult, resourcePoolObj *object.ResourcePool, folder *object.Folder, host *object.HostSystem, filePath string, deployOva bool, fromLocal bool, allowUnverifiedSSL bool) error { var currBytesRead int64 @@ -100,15 +100,15 @@ func DeployOvfAndGetResult(ovfCreateImportSpecResult *types.OvfCreateImportSpecR } if !deployOva { if fromLocal { - err = uploadDisksFromLocal(filePath, ovfFileItem, deviceObj, &currBytesRead) + err = uploadDisksFromLocal(client, filePath, ovfFileItem, deviceObj, &currBytesRead) } else { - err = uploadDisksFromURL(filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) + err = uploadDisksFromURL(client, filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) } } else { if fromLocal { - err = uploadOvaDisksFromLocal(filePath, ovfFileItem, deviceObj, &currBytesRead) + err = uploadOvaDisksFromLocal(client, filePath, ovfFileItem, deviceObj, &currBytesRead) } else { - err = uploadOvaDisksFromURL(filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) + err = uploadOvaDisksFromURL(client, filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) } } if err != nil { @@ -125,12 +125,13 @@ func DeployOvfAndGetResult(ovfCreateImportSpecResult *types.OvfCreateImportSpecR return nfcLease.Complete(context.Background()) } -func upload(ctx context.Context, item types.OvfFileItem, f io.Reader, url string, size int64, totalBytesRead *int64) error { - u, err := soap.ParseURL(url) +func upload(ctx context.Context, client *govmomi.Client, item types.OvfFileItem, f io.Reader, rawUrl string, size int64, totalBytesRead *int64) error { + u, err := client.Client.ParseURL(rawUrl) if err != nil { return err } - c := soap.NewClient(u, true) + url := u.String() + c := client.Client.Client param := soap.Upload{ ContentLength: size, @@ -185,7 +186,7 @@ func upload(ctx context.Context, item types.OvfFileItem, f io.Reader, url string return err } -func uploadDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { +func uploadDisksFromLocal(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { absoluteFilePath := "" if strings.Contains(filePath, string(os.PathSeparator)) { absoluteFilePath = string(filePath[0 : strings.LastIndex(filePath, string(os.PathSeparator))+1]) @@ -196,7 +197,7 @@ func uploadDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, device if err != nil { return err } - err = upload(context.Background(), ovfFileItem, file, deviceObj.Url, ovfFileItem.Size, currBytesRead) + err = upload(context.Background(), client, ovfFileItem, file, deviceObj.Url, ovfFileItem.Size, currBytesRead) if err != nil { return fmt.Errorf("error while uploading the file %s %s", vmdkFilePath, err) } @@ -207,15 +208,15 @@ func uploadDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, device return nil } -func uploadDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, +func uploadDisksFromURL(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, allowUnverifiedSSL bool) error { absoluteFilePath := "" if strings.Contains(filePath, "/") { absoluteFilePath = string(filePath[0 : strings.LastIndex(filePath, "/")+1]) } vmdkFilePath := absoluteFilePath + ovfFileItem.Path - client := getClient(allowUnverifiedSSL) - resp, err := client.Get(vmdkFilePath) + httpClient := getClient(allowUnverifiedSSL) + resp, err := httpClient.Get(vmdkFilePath) log.Print(" [DEBUG] Absolute vmdk path: " + vmdkFilePath) if err != nil { return err @@ -223,11 +224,11 @@ func uploadDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, deviceOb defer func(Body io.ReadCloser) { _ = Body.Close() }(resp.Body) - err = upload(context.Background(), ovfFileItem, resp.Body, deviceObj.Url, ovfFileItem.Size, currBytesRead) + err = upload(context.Background(), client, ovfFileItem, resp.Body, deviceObj.Url, ovfFileItem.Size, currBytesRead) return err } -func uploadOvaDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { +func uploadOvaDisksFromLocal(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { diskName := ovfFileItem.Path ovaFile, err := os.Open(filePath) if err != nil { @@ -237,15 +238,15 @@ func uploadOvaDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, dev _ = ovaFile.Close() }(ovaFile) - err = findAndUploadDiskFromOva(ovaFile, diskName, ovfFileItem, deviceObj, currBytesRead) + err = findAndUploadDiskFromOva(client, ovaFile, diskName, ovfFileItem, deviceObj, currBytesRead) return err } -func uploadOvaDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, +func uploadOvaDisksFromURL(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, allowUnverifiedSSL bool) error { diskName := ovfFileItem.Path - client := getClient(allowUnverifiedSSL) - resp, err := client.Get(filePath) + httpClient := getClient(allowUnverifiedSSL) + resp, err := httpClient.Get(filePath) if err != nil { return err } @@ -253,7 +254,7 @@ func uploadOvaDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, devic _ = Body.Close() }(resp.Body) if resp.StatusCode == http.StatusOK { - err = findAndUploadDiskFromOva(resp.Body, diskName, ovfFileItem, deviceObj, currBytesRead) + err = findAndUploadDiskFromOva(client, resp.Body, diskName, ovfFileItem, deviceObj, currBytesRead) if err != nil { return err } @@ -345,7 +346,7 @@ func getOvfDescriptorFromOva(ovaFile io.Reader) (string, error) { return "", fmt.Errorf("ovf file not found inside the ova") } -func findAndUploadDiskFromOva(ovaFile io.Reader, diskName string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { +func findAndUploadDiskFromOva(client *govmomi.Client, ovaFile io.Reader, diskName string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { ovaReader := tar.NewReader(ovaFile) for { fileHdr, err := ovaReader.Next() @@ -356,7 +357,7 @@ func findAndUploadDiskFromOva(ovaFile io.Reader, diskName string, ovfFileItem ty return err } if fileHdr.Name == diskName { - err = upload(context.Background(), ovfFileItem, ovaReader, deviceObj.Url, ovfFileItem.Size, currBytesRead) + err = upload(context.Background(), client, ovfFileItem, ovaReader, deviceObj.Url, ovfFileItem.Size, currBytesRead) if err != nil { return fmt.Errorf("error while uploading the file %s %s", diskName, err) } @@ -557,7 +558,7 @@ func (o *OvfHelper) GetImportSpec(client *govmomi.Client) (*types.OvfCreateImpor return is, nil } -func (o *OvfHelper) DeployOvf(spec *types.OvfCreateImportSpecResult) error { - return DeployOvfAndGetResult(spec, o.ResourcePool, o.Folder, o.HostSystem, +func (o *OvfHelper) DeployOvf(client *govmomi.Client, spec *types.OvfCreateImportSpecResult) error { + return DeployOvfAndGetResult(client, spec, o.ResourcePool, o.Folder, o.HostSystem, o.FilePath, o.DeployOva, o.IsLocal, o.AllowUnverifiedSSL) } diff --git a/vsphere/internal/helper/vsanclient/vsan_client_helper.go b/vsphere/internal/helper/vsanclient/vsan_client_helper.go index a2f792e54..3903d21c3 100644 --- a/vsphere/internal/helper/vsanclient/vsan_client_helper.go +++ b/vsphere/internal/helper/vsanclient/vsan_client_helper.go @@ -7,8 +7,11 @@ import ( "context" "github.com/hashicorp/terraform-provider-vsphere/vsphere/internal/helper/provider" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" vimtypes "github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vsan" + "github.com/vmware/govmomi/vsan/methods" vsantypes "github.com/vmware/govmomi/vsan/types" ) @@ -30,3 +33,49 @@ func GetVsanConfig(vsanClient *vsan.Client, cluster vimtypes.ManagedObjectRefere return vsanConfig, err } + +func ConvertToStretchedCluster(vsanClient *vsan.Client, client *govmomi.Client, req vsantypes.VSANVcConvertToStretchedCluster) error { + ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout) + defer cancel() + + res, err := methods.VSANVcConvertToStretchedCluster(ctx, vsanClient, &req) + + if err != nil { + return err + } + + task := object.NewTask(client.Client, res.Returnval) + return task.Wait(ctx) +} + +// removing the witness host automatically disables stretched cluster. +func RemoveWitnessHost(vsanClient *vsan.Client, client *govmomi.Client, req vsantypes.VSANVcRemoveWitnessHost) error { + ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout) + defer cancel() + + res, err := methods.VSANVcRemoveWitnessHost(ctx, vsanClient, &req) + + if err != nil { + return err + } + + task := object.NewTask(client.Client, res.Returnval) + return task.Wait(ctx) +} + +func GetWitnessHosts(vsanClient *vsan.Client, cluster vimtypes.ManagedObjectReference) (*vsantypes.VSANVcGetWitnessHostsResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout) + defer cancel() + + req := vsantypes.VSANVcGetWitnessHosts{ + This: vsan.VsanVcStretchedClusterSystem, + Cluster: cluster.Reference(), + } + + res, err := methods.VSANVcGetWitnessHosts(ctx, vsanClient, &req) + if err != nil { + return nil, err + } + + return res, err +} diff --git a/vsphere/resource_vsphere_compute_cluster.go b/vsphere/resource_vsphere_compute_cluster.go index fd340c3f0..6f499e889 100644 --- a/vsphere/resource_vsphere_compute_cluster.go +++ b/vsphere/resource_vsphere_compute_cluster.go @@ -28,6 +28,7 @@ import ( "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vsan" vsantypes "github.com/vmware/govmomi/vsan/types" ) @@ -609,6 +610,41 @@ func resourceVSphereComputeCluster() *schema.Resource { }, }, }, + "vsan_stretched_cluster": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "The configuration for stretched cluster.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preferred_fault_domain_host_ids": { + Type: schema.TypeSet, + Required: true, + Description: "The managed object IDs of the hosts to put in the first fault domain.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "secondary_fault_domain_host_ids": { + Type: schema.TypeSet, + Required: true, + Description: "The managed object IDs of the hosts to put in the second fault domain.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "witness_node": { + Type: schema.TypeString, + Required: true, + Description: "The managed object IDs of the host selected as witness node when enable stretched cluster.", + }, + "preferred_fault_domain_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of prepferred fault domain.", + Default: "Preferred", + }, + "secondary_fault_domain_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of secondary fault domain.", + Default: "Secondary", }, }, }, @@ -1439,6 +1475,10 @@ func resourceVSphereComputeClusterFlattenData( return err } + if err := flattenVsanStretchedCluster(meta.(*Client).vsanClient, d, cluster, props.ConfigurationEx.(*types.ClusterConfigInfoEx)); err != nil { + return err + } + return flattenClusterConfigSpecEx(d, props.ConfigurationEx.(*types.ClusterConfigInfoEx), version) } @@ -1582,6 +1622,59 @@ func expandVsanDatastoreConfig(d *schema.ResourceData, meta interface{}) (*vsant return conf, nil } +func buildVsanStretchedClusterReq(d *schema.ResourceData, cluster types.ManagedObjectReference) (*vsantypes.VSANVcConvertToStretchedCluster, error) { + log.Printf("[DEBUG] building vsan stretched cluster request...") + conf := d.Get("vsan_stretched_cluster").([]interface{})[0].(map[string]interface{}) + + hostSet := map[interface{}]bool{} + hostCount := 0 + for _, host := range conf["preferred_fault_domain_host_ids"].(*schema.Set).List() { + hostSet[host] = true + hostCount++ + } + for _, host := range conf["secondary_fault_domain_host_ids"].(*schema.Set).List() { + hostSet[host] = true + hostCount++ + } + if len(hostSet) != hostCount { + return nil, fmt.Errorf("duplicate hostId appears in preferred fault domain host ids and secondary fault domain host ids") + } + + witness := structure.SliceStringsToManagedObjectReferences([]string{conf["witness_node"].(string)}, "HostSystem") + + faultDomainConfig := vsantypes.VimClusterVSANStretchedClusterFaultDomainConfig{ + FirstFdName: conf["preferred_fault_domain_name"].(string), + FirstFdHosts: structure.SliceInterfacesToManagedObjectReferences(conf["preferred_fault_domain_host_ids"].(*schema.Set).List(), "HostSystem"), + SecondFdName: conf["secondary_fault_domain_name"].(string), + SecondFdHosts: structure.SliceInterfacesToManagedObjectReferences(conf["secondary_fault_domain_host_ids"].(*schema.Set).List(), "HostSystem"), + } + + // TODO: make diskmapping configurable. + return &vsantypes.VSANVcConvertToStretchedCluster{ + This: vsan.VsanVcStretchedClusterSystem, + Cluster: cluster.Reference(), + FaultDomainConfig: faultDomainConfig, + WitnessHost: witness[0], + PreferredFd: conf["preferred_fault_domain_name"].(string), + }, nil +} + +func buildVsanRemoveWitnessHostReq(d *schema.ResourceData, cluster types.ManagedObjectReference, client *vsan.Client) (*vsantypes.VSANVcRemoveWitnessHost, error) { + log.Printf("[DEBUG] building vsan remove witness request...") + + res, err := vsanclient.GetWitnessHosts(client, cluster.Reference()) + if err != nil { + return nil, fmt.Errorf("failed to get witness_node when removing witness!") + } + + return &vsantypes.VSANVcRemoveWitnessHost{ + This: vsan.VsanVcStretchedClusterSystem, + Cluster: cluster.Reference(), + WitnessHost: res.Returnval[0].Host, + WitnessAddress: res.Returnval[0].UnicastAgentAddr, + }, nil +} + func resourceVSphereComputeClusterApplyVsanConfig(d *schema.ResourceData, meta interface{}, cluster *object.ClusterComputeResource) error { client, err := resourceVSphereComputeClusterClient(meta) if err != nil { @@ -1661,6 +1754,38 @@ func resourceVSphereComputeClusterApplyVsanConfig(d *schema.ResourceData, meta i return fmt.Errorf("cannot apply vsan remote datastores on cluster '%s': %s", d.Get("name").(string), err) } + // handle stretched cluster + if d.HasChange("vsan_stretched_cluster") { + _, n := d.GetChange("vsan_stretched_cluster") + // build or reconfigure stretched cluster + if len(n.([]interface{})) > 0 && n.([]interface{})[0].(map[string]interface{})["witness_node"].(string) != "" { + req, err := buildVsanStretchedClusterReq(d, cluster.Reference()) + if err != nil { + return err + } + + if err := vsanclient.ConvertToStretchedCluster(meta.(*Client).vsanClient, meta.(*Client).vimClient, *req); err != nil { + return fmt.Errorf("cannot stretch cluster %s with spec: %#v\n, err: %#v", d.Get("name").(string), *req, err) + } else { + log.Printf("[DEBUG] stretching cluster %s with spec: %#v", d.Get("name").(string), *req) + } + } + + // disable stretched cluster + if len(n.([]interface{})) == 0 || n.([]interface{})[0].(map[string]interface{})["witness_node"].(string) == "" { + req, err := buildVsanRemoveWitnessHostReq(d, cluster.Reference(), meta.(*Client).vsanClient) + if err != nil { + return err + } + + if err := vsanclient.RemoveWitnessHost(meta.(*Client).vsanClient, meta.(*Client).vimClient, *req); err != nil { + return fmt.Errorf("cannot disable stretched cluster %s with spec: %#v", d.Get("name").(string), *req) + } else { + log.Printf("[DEBUG] disabling stretched cluster %s with spec: %#v", d.Get("name").(string), *req) + } + } + } + return nil } @@ -1844,6 +1969,49 @@ func flattenVsanDisks(d *schema.ResourceData, cluster *object.ClusterComputeReso return d.Set("vsan_disk_group", diskMap) } +func flattenVsanStretchedCluster(client *vsan.Client, d *schema.ResourceData, cluster *object.ClusterComputeResource, obj *types.ClusterConfigInfoEx) error { + res, err := vsanclient.GetWitnessHosts(client, cluster.Reference()) + if err != nil { + return err + } + + if res.Returnval == nil { + return d.Set("vsan_stretched_cluster", []interface{}{}) + } + + if res.Returnval[0].UnicastAgentAddr != "" { + var conf []interface{} + + for _, witnessHost := range res.Returnval { + preferredFaultDomainName := witnessHost.PreferredFdName + var secondaryFaultDomainName string + var preferredFaultDomainHostIds []string + var secondaryFaultDomainHostIds []string + for _, hostConf := range obj.VsanHostConfig { + name := hostConf.FaultDomainInfo.Name + if name == preferredFaultDomainName { + preferredFaultDomainHostIds = append(preferredFaultDomainHostIds, hostConf.HostSystem.Value) + } else { + if secondaryFaultDomainName == "" { + secondaryFaultDomainName = name + } + secondaryFaultDomainHostIds = append(secondaryFaultDomainHostIds, hostConf.HostSystem.Value) + } + } + conf = append(conf, map[string]interface{}{ + "preferred_fault_domain_host_ids": preferredFaultDomainHostIds, + "secondary_fault_domain_host_ids": secondaryFaultDomainHostIds, + "witness_node": witnessHost.Host.Value, + "preferred_fault_domain_name": preferredFaultDomainName, + "secondary_fault_domain_name": secondaryFaultDomainName, + }) + } + return d.Set("vsan_stretched_cluster", conf) + } else { + return fmt.Errorf("error getting witness node for cluster %s, agent address was unexpectedly empty", d.Get("name").(string)) + } +} + // flattenClusterConfigSpecEx saves a ClusterConfigSpecEx into the supplied // ResourceData. func flattenClusterConfigSpecEx(d *schema.ResourceData, obj *types.ClusterConfigInfoEx, version viapi.VSphereVersion) error { diff --git a/vsphere/resource_vsphere_compute_cluster_test.go b/vsphere/resource_vsphere_compute_cluster_test.go index 8740ff4dd..07541548d 100644 --- a/vsphere/resource_vsphere_compute_cluster_test.go +++ b/vsphere/resource_vsphere_compute_cluster_test.go @@ -373,6 +373,50 @@ func TestAccResourceVSphereComputeCluster_faultDomain(t *testing.T) { }) } +func TestAccResourceVSphereComputeCluster_vsanStretchedCluster(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + RunSweepers() + testAccPreCheck(t) + testAccResourceVSphereComputeClusterPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereComputeClusterCheckExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereComputeClusterStretchedClusterEnabled(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterCheckExists(true), + resource.TestCheckTypeSetElemAttrPair( + "vsphere_compute_cluster.compute_cluster", + "vsan_stretched_cluster.*.preferred_fault_domain_host_ids.*", + "data.vsphere_host.roothost1", + "id", + ), + resource.TestCheckTypeSetElemAttrPair( + "vsphere_compute_cluster.compute_cluster", + "vsan_stretched_cluster.*.secondary_fault_domain_host_ids.*", + "data.vsphere_host.roothost2", + "id", + ), + resource.TestCheckTypeSetElemAttrPair( + "vsphere_compute_cluster.compute_cluster", + "vsan_stretched_cluster.*.witness_node", + "data.vsphere_host.roothost3", + "id", + ), + ), + }, + { + Config: testAccResourceVSphereComputeClusterStretchedClusterDisabled(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterCheckExists(true), + ), + }, + }, + }) +} + func TestAccResourceVSphereComputeCluster_explicitFailoverHost(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -1118,6 +1162,57 @@ resource "vsphere_compute_cluster" "compute_cluster" { ) } +func testAccResourceVSphereComputeClusterStretchedClusterEnabled() string { + return fmt.Sprintf(` +%s + +resource "vsphere_compute_cluster" "compute_cluster" { + name = "testacc-compute-cluster" + datacenter_id = data.vsphere_datacenter.rootdc1.id + host_system_ids = [data.vsphere_host.roothost1.id, data.vsphere_host.roothost2.id] + + vsan_enabled = true + vsan_stretched_cluster { + preferred_fault_domain_host_ids = [data.vsphere_host.roothost1.id] + secondary_fault_domain_host_ids = [data.vsphere_host.roothost2.id] + witness_node = data.vsphere_host.roothost3.id + } + force_evacuate_on_destroy = true +} + +`, + testhelper.CombineConfigs( + testhelper.ConfigDataRootDC1(), + testhelper.ConfigDataRootHost1(), + testhelper.ConfigDataRootHost2(), + testhelper.ConfigDataRootHost3(), + ), + ) +} + +func testAccResourceVSphereComputeClusterStretchedClusterDisabled() string { + return fmt.Sprintf(` +%s + +resource "vsphere_compute_cluster" "compute_cluster" { + name = "testacc-compute-cluster" + datacenter_id = data.vsphere_datacenter.rootdc1.id + host_system_ids = [data.vsphere_host.roothost1.id, data.vsphere_host.roothost2.id] + + vsan_enabled = true + force_evacuate_on_destroy = true +} + +`, + testhelper.CombineConfigs( + testhelper.ConfigDataRootDC1(), + testhelper.ConfigDataRootHost1(), + testhelper.ConfigDataRootHost2(), + testhelper.ConfigDataRootHost3(), + ), + ) +} + func testAccResourceVSphereComputeClusterConfigBasic() string { return fmt.Sprintf(` %s diff --git a/vsphere/resource_vsphere_virtual_machine.go b/vsphere/resource_vsphere_virtual_machine.go index 99daae615..9c5671c57 100644 --- a/vsphere/resource_vsphere_virtual_machine.go +++ b/vsphere/resource_vsphere_virtual_machine.go @@ -1362,7 +1362,7 @@ func resourceVsphereMachineDeployOvfAndOva(d *schema.ResourceData, meta interfac } log.Print(" [DEBUG] start deploying from ovf/ova Template") - err = ovfHelper.DeployOvf(ovfImportspec) + err = ovfHelper.DeployOvf(client, ovfImportspec) if err != nil { return nil, fmt.Errorf("error while importing ovf/ova template, %s", err) } diff --git a/website/docs/r/compute_cluster.html.markdown b/website/docs/r/compute_cluster.html.markdown index 51f855dba..81093db07 100644 --- a/website/docs/r/compute_cluster.html.markdown +++ b/website/docs/r/compute_cluster.html.markdown @@ -492,9 +492,15 @@ details, see the referenced link in the above paragraph. * `cache` - The canonical name of the disk to use for vSAN cache. * `storage` - An array of disk canonical names for vSAN storage. * `vsan_fault_domains` - (Optional) Configurations of vSAN fault domains. - * `fault_domain` - (Optional) The configuration for single fault domain. + * `fault_domain` - The configuration for single fault domain. * `name` - The name of fault domain. * `host_ids` - The managed object IDs of the hosts to put in the fault domain. +* `vsan_stretched_cluster` - (Optional) Configurations of vSAN stretched cluster. + * `preferred_fault_domain_host_ids` - The managed object IDs of the hosts to put in the first fault domain. + * `secondary_fault_domain_host_ids` - The managed object IDs of the hosts to put in the second fault domain. + * `witness_node` - The managed object IDs of the host selected as witness node when enable stretched cluster. + * `preferred_fault_domain_name` - (Optional) The name of first fault domain. Default is `Preferred`. + * `secondary_fault_domain_name` - (Optional) The name of second fault domain. Default is `Secondary`. ~> **NOTE:** You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.