From 5e66f7bbf241086a9d6c7f86fcc7dff929e72ad9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Pra=C5=BCak?= Date: Fri, 20 Oct 2023 20:10:50 +0200 Subject: [PATCH 1/6] fix: support asterisk URL in import (#1813) - use the correct URL parsing function from govmomi library to replace asterisk with a host when needed Refs: #1806 Co-authored-by: Ryan Johnson --- CHANGELOG.md | 4 ++ .../internal/helper/ovfdeploy/ovf_helper.go | 49 ++++++++++--------- vsphere/resource_vsphere_virtual_machine.go | 2 +- 3 files changed, 30 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 978d1055b..4f1878543 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ ## 2.6.0 (Unreleased) +BUG FIXES: + +* `resource/virtual_machine`: Resolves upload error when deploying an OVF/OVA directly to an ESXi host. ([#1813](https://github.com/terraform-providers/terraform-provider-vsphere/pull/1813)) + FEATURES: * `resource/compute_cluster`: Adds support for vSAN Express Storage Architecture in vSphere 8.0. ([#1874](https://github.com/terraform-providers/terraform-provider-vsphere/pull/1874)) diff --git a/vsphere/internal/helper/ovfdeploy/ovf_helper.go b/vsphere/internal/helper/ovfdeploy/ovf_helper.go index 719455a77..b6f78f923 100644 --- a/vsphere/internal/helper/ovfdeploy/ovf_helper.go +++ b/vsphere/internal/helper/ovfdeploy/ovf_helper.go @@ -49,7 +49,7 @@ func (pr *ProgressReader) Read(p []byte) (n int, err error) { return } -func DeployOvfAndGetResult(ovfCreateImportSpecResult *types.OvfCreateImportSpecResult, resourcePoolObj *object.ResourcePool, +func DeployOvfAndGetResult(client *govmomi.Client, ovfCreateImportSpecResult *types.OvfCreateImportSpecResult, resourcePoolObj *object.ResourcePool, folder *object.Folder, host *object.HostSystem, filePath string, deployOva bool, fromLocal bool, allowUnverifiedSSL bool) error { var currBytesRead int64 @@ -100,15 +100,15 @@ func DeployOvfAndGetResult(ovfCreateImportSpecResult *types.OvfCreateImportSpecR } if !deployOva { if fromLocal { - err = uploadDisksFromLocal(filePath, ovfFileItem, deviceObj, &currBytesRead) + err = uploadDisksFromLocal(client, filePath, ovfFileItem, deviceObj, &currBytesRead) } else { - err = uploadDisksFromURL(filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) + err = uploadDisksFromURL(client, filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) } } else { if fromLocal { - err = uploadOvaDisksFromLocal(filePath, ovfFileItem, deviceObj, &currBytesRead) + err = uploadOvaDisksFromLocal(client, filePath, ovfFileItem, deviceObj, &currBytesRead) } else { - err = uploadOvaDisksFromURL(filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) + err = uploadOvaDisksFromURL(client, filePath, ovfFileItem, deviceObj, &currBytesRead, allowUnverifiedSSL) } } if err != nil { @@ -125,12 +125,13 @@ func DeployOvfAndGetResult(ovfCreateImportSpecResult *types.OvfCreateImportSpecR return nfcLease.Complete(context.Background()) } -func upload(ctx context.Context, item types.OvfFileItem, f io.Reader, url string, size int64, totalBytesRead *int64) error { - u, err := soap.ParseURL(url) +func upload(ctx context.Context, client *govmomi.Client, item types.OvfFileItem, f io.Reader, rawUrl string, size int64, totalBytesRead *int64) error { + u, err := client.Client.ParseURL(rawUrl) if err != nil { return err } - c := soap.NewClient(u, true) + url := u.String() + c := client.Client.Client param := soap.Upload{ ContentLength: size, @@ -185,7 +186,7 @@ func upload(ctx context.Context, item types.OvfFileItem, f io.Reader, url string return err } -func uploadDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { +func uploadDisksFromLocal(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { absoluteFilePath := "" if strings.Contains(filePath, string(os.PathSeparator)) { absoluteFilePath = string(filePath[0 : strings.LastIndex(filePath, string(os.PathSeparator))+1]) @@ -196,7 +197,7 @@ func uploadDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, device if err != nil { return err } - err = upload(context.Background(), ovfFileItem, file, deviceObj.Url, ovfFileItem.Size, currBytesRead) + err = upload(context.Background(), client, ovfFileItem, file, deviceObj.Url, ovfFileItem.Size, currBytesRead) if err != nil { return fmt.Errorf("error while uploading the file %s %s", vmdkFilePath, err) } @@ -207,15 +208,15 @@ func uploadDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, device return nil } -func uploadDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, +func uploadDisksFromURL(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, allowUnverifiedSSL bool) error { absoluteFilePath := "" if strings.Contains(filePath, "/") { absoluteFilePath = string(filePath[0 : strings.LastIndex(filePath, "/")+1]) } vmdkFilePath := absoluteFilePath + ovfFileItem.Path - client := getClient(allowUnverifiedSSL) - resp, err := client.Get(vmdkFilePath) + httpClient := getClient(allowUnverifiedSSL) + resp, err := httpClient.Get(vmdkFilePath) log.Print(" [DEBUG] Absolute vmdk path: " + vmdkFilePath) if err != nil { return err @@ -223,11 +224,11 @@ func uploadDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, deviceOb defer func(Body io.ReadCloser) { _ = Body.Close() }(resp.Body) - err = upload(context.Background(), ovfFileItem, resp.Body, deviceObj.Url, ovfFileItem.Size, currBytesRead) + err = upload(context.Background(), client, ovfFileItem, resp.Body, deviceObj.Url, ovfFileItem.Size, currBytesRead) return err } -func uploadOvaDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { +func uploadOvaDisksFromLocal(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { diskName := ovfFileItem.Path ovaFile, err := os.Open(filePath) if err != nil { @@ -237,15 +238,15 @@ func uploadOvaDisksFromLocal(filePath string, ovfFileItem types.OvfFileItem, dev _ = ovaFile.Close() }(ovaFile) - err = findAndUploadDiskFromOva(ovaFile, diskName, ovfFileItem, deviceObj, currBytesRead) + err = findAndUploadDiskFromOva(client, ovaFile, diskName, ovfFileItem, deviceObj, currBytesRead) return err } -func uploadOvaDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, +func uploadOvaDisksFromURL(client *govmomi.Client, filePath string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64, allowUnverifiedSSL bool) error { diskName := ovfFileItem.Path - client := getClient(allowUnverifiedSSL) - resp, err := client.Get(filePath) + httpClient := getClient(allowUnverifiedSSL) + resp, err := httpClient.Get(filePath) if err != nil { return err } @@ -253,7 +254,7 @@ func uploadOvaDisksFromURL(filePath string, ovfFileItem types.OvfFileItem, devic _ = Body.Close() }(resp.Body) if resp.StatusCode == http.StatusOK { - err = findAndUploadDiskFromOva(resp.Body, diskName, ovfFileItem, deviceObj, currBytesRead) + err = findAndUploadDiskFromOva(client, resp.Body, diskName, ovfFileItem, deviceObj, currBytesRead) if err != nil { return err } @@ -345,7 +346,7 @@ func getOvfDescriptorFromOva(ovaFile io.Reader) (string, error) { return "", fmt.Errorf("ovf file not found inside the ova") } -func findAndUploadDiskFromOva(ovaFile io.Reader, diskName string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { +func findAndUploadDiskFromOva(client *govmomi.Client, ovaFile io.Reader, diskName string, ovfFileItem types.OvfFileItem, deviceObj types.HttpNfcLeaseDeviceUrl, currBytesRead *int64) error { ovaReader := tar.NewReader(ovaFile) for { fileHdr, err := ovaReader.Next() @@ -356,7 +357,7 @@ func findAndUploadDiskFromOva(ovaFile io.Reader, diskName string, ovfFileItem ty return err } if fileHdr.Name == diskName { - err = upload(context.Background(), ovfFileItem, ovaReader, deviceObj.Url, ovfFileItem.Size, currBytesRead) + err = upload(context.Background(), client, ovfFileItem, ovaReader, deviceObj.Url, ovfFileItem.Size, currBytesRead) if err != nil { return fmt.Errorf("error while uploading the file %s %s", diskName, err) } @@ -557,7 +558,7 @@ func (o *OvfHelper) GetImportSpec(client *govmomi.Client) (*types.OvfCreateImpor return is, nil } -func (o *OvfHelper) DeployOvf(spec *types.OvfCreateImportSpecResult) error { - return DeployOvfAndGetResult(spec, o.ResourcePool, o.Folder, o.HostSystem, +func (o *OvfHelper) DeployOvf(client *govmomi.Client, spec *types.OvfCreateImportSpecResult) error { + return DeployOvfAndGetResult(client, spec, o.ResourcePool, o.Folder, o.HostSystem, o.FilePath, o.DeployOva, o.IsLocal, o.AllowUnverifiedSSL) } diff --git a/vsphere/resource_vsphere_virtual_machine.go b/vsphere/resource_vsphere_virtual_machine.go index 99daae615..9c5671c57 100644 --- a/vsphere/resource_vsphere_virtual_machine.go +++ b/vsphere/resource_vsphere_virtual_machine.go @@ -1362,7 +1362,7 @@ func resourceVsphereMachineDeployOvfAndOva(d *schema.ResourceData, meta interfac } log.Print(" [DEBUG] start deploying from ovf/ova Template") - err = ovfHelper.DeployOvf(ovfImportspec) + err = ovfHelper.DeployOvf(client, ovfImportspec) if err != nil { return nil, fmt.Errorf("error while importing ovf/ova template, %s", err) } From 9b4a81dbeb0bb1179983e3f39ad5632f4ef7a470 Mon Sep 17 00:00:00 2001 From: Ryan Johnson Date: Wed, 1 Nov 2023 14:15:19 -0400 Subject: [PATCH 2/6] chore(deps): bump google.golang.org/grpc from 1.57.0 to 1.57.1 (#2052) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 92aab9217..707a28e74 100644 --- a/go.mod +++ b/go.mod @@ -55,6 +55,6 @@ require ( golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.57.0 // indirect + google.golang.org/grpc v1.57.1 // indirect google.golang.org/protobuf v1.31.0 // indirect ) diff --git a/go.sum b/go.sum index 0e1b0ca8a..430339f80 100644 --- a/go.sum +++ b/go.sum @@ -195,8 +195,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg= +google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= From 3b037a2cdbdce915723f99da17beb4cef3bcd052 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 12:05:29 -0500 Subject: [PATCH 3/6] chore(deps): bump peter-evans/create-or-update-comment (#2043) Bumps [peter-evans/create-or-update-comment](https://github.com/peter-evans/create-or-update-comment) from 3.0.2 to 3.1.0. - [Release notes](https://github.com/peter-evans/create-or-update-comment/releases) - [Commits](https://github.com/peter-evans/create-or-update-comment/compare/c6c9a1a66007646a28c153e2a8580a5bad27bcfa...23ff15729ef2fc348714a3bb66d2f655ca9066f2) --- updated-dependencies: - dependency-name: peter-evans/create-or-update-comment dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/issue_greeting.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/issue_greeting.yml b/.github/workflows/issue_greeting.yml index d6cfae06d..851f12487 100644 --- a/.github/workflows/issue_greeting.yml +++ b/.github/workflows/issue_greeting.yml @@ -22,7 +22,7 @@ jobs: vars: | author: ${{ github.actor }} - name: Create Comment - uses: peter-evans/create-or-update-comment@c6c9a1a66007646a28c153e2a8580a5bad27bcfa # v3.0.2 + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 with: issue-number: '${{ github.event.issue.number }}' body: '${{ steps.template.outputs.result }}' From a79f2f60ce9aaa8422a26bf74dce73541fc76ab8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 12:05:52 -0500 Subject: [PATCH 4/6] chore(deps): bump actions/setup-node from 3.8.1 to 4.0.0 (#2045) Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.8.1 to 4.0.0. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d...8f152de45cc393bb48ce5d89d36b731f54556e65) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/acceptance-tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/acceptance-tests.yaml b/.github/workflows/acceptance-tests.yaml index bf19523ee..cf2f6b3e5 100644 --- a/.github/workflows/acceptance-tests.yaml +++ b/.github/workflows/acceptance-tests.yaml @@ -63,7 +63,7 @@ jobs: go-version-file: '.go-version' - name: Set up Node - uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1 + uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0 with: node-version: 18 From 8137f498e04f95f7e40274a974ea204eef872ee5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 12:06:05 -0500 Subject: [PATCH 5/6] chore(deps): bump hashicorp/setup-terraform from 2.0.3 to 3.0.0 (#2051) Bumps [hashicorp/setup-terraform](https://github.com/hashicorp/setup-terraform) from 2.0.3 to 3.0.0. - [Release notes](https://github.com/hashicorp/setup-terraform/releases) - [Changelog](https://github.com/hashicorp/setup-terraform/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/setup-terraform/compare/633666f66e0061ca3b725c73b2ec20cd13a8fdd1...a1502cd9e758c50496cc9ac5308c4843bcd56d36) --- updated-dependencies: - dependency-name: hashicorp/setup-terraform dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/acceptance-tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/acceptance-tests.yaml b/.github/workflows/acceptance-tests.yaml index cf2f6b3e5..063a6d545 100644 --- a/.github/workflows/acceptance-tests.yaml +++ b/.github/workflows/acceptance-tests.yaml @@ -42,7 +42,7 @@ jobs: echo "VSPHERE_REST_SESSION_PATH=$(pwd)/rest_sessions" >> $GITHUB_ENV - name: Set up Terraform - uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3 + uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0 with: terraform_wrapper: false From c644fcdeb6fefde300a614870fabb2ee33494edf Mon Sep 17 00:00:00 2001 From: Xinyu Zhang <109590542+zxinyu08@users.noreply.github.com> Date: Fri, 10 Nov 2023 01:27:01 +0800 Subject: [PATCH 6/6] feat: add vsan stretched cluster support (#1885) Adds support for vSAN stretched clusters. --- CHANGELOG.md | 1 + .../helper/vsanclient/vsan_client_helper.go | 49 +++++ vsphere/resource_vsphere_compute_cluster.go | 172 ++++++++++++++++++ .../resource_vsphere_compute_cluster_test.go | 95 ++++++++++ website/docs/r/compute_cluster.html.markdown | 6 + 5 files changed, 323 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f1878543..2dfd8923f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ BUG FIXES: FEATURES: * `resource/compute_cluster`: Adds support for vSAN Express Storage Architecture in vSphere 8.0. ([#1874](https://github.com/terraform-providers/terraform-provider-vsphere/pull/1874)) +* `resource/compute_cluster`: Adds support for vSAN stretched clusters. ([#1885](https://github.com/hashicorp/terraform-provider-vsphere/pull/1885/)) ## 2.5.1 (October 12, 2023) diff --git a/vsphere/internal/helper/vsanclient/vsan_client_helper.go b/vsphere/internal/helper/vsanclient/vsan_client_helper.go index a2f792e54..3903d21c3 100644 --- a/vsphere/internal/helper/vsanclient/vsan_client_helper.go +++ b/vsphere/internal/helper/vsanclient/vsan_client_helper.go @@ -7,8 +7,11 @@ import ( "context" "github.com/hashicorp/terraform-provider-vsphere/vsphere/internal/helper/provider" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" vimtypes "github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vsan" + "github.com/vmware/govmomi/vsan/methods" vsantypes "github.com/vmware/govmomi/vsan/types" ) @@ -30,3 +33,49 @@ func GetVsanConfig(vsanClient *vsan.Client, cluster vimtypes.ManagedObjectRefere return vsanConfig, err } + +func ConvertToStretchedCluster(vsanClient *vsan.Client, client *govmomi.Client, req vsantypes.VSANVcConvertToStretchedCluster) error { + ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout) + defer cancel() + + res, err := methods.VSANVcConvertToStretchedCluster(ctx, vsanClient, &req) + + if err != nil { + return err + } + + task := object.NewTask(client.Client, res.Returnval) + return task.Wait(ctx) +} + +// removing the witness host automatically disables stretched cluster. +func RemoveWitnessHost(vsanClient *vsan.Client, client *govmomi.Client, req vsantypes.VSANVcRemoveWitnessHost) error { + ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout) + defer cancel() + + res, err := methods.VSANVcRemoveWitnessHost(ctx, vsanClient, &req) + + if err != nil { + return err + } + + task := object.NewTask(client.Client, res.Returnval) + return task.Wait(ctx) +} + +func GetWitnessHosts(vsanClient *vsan.Client, cluster vimtypes.ManagedObjectReference) (*vsantypes.VSANVcGetWitnessHostsResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout) + defer cancel() + + req := vsantypes.VSANVcGetWitnessHosts{ + This: vsan.VsanVcStretchedClusterSystem, + Cluster: cluster.Reference(), + } + + res, err := methods.VSANVcGetWitnessHosts(ctx, vsanClient, &req) + if err != nil { + return nil, err + } + + return res, err +} diff --git a/vsphere/resource_vsphere_compute_cluster.go b/vsphere/resource_vsphere_compute_cluster.go index a843ffa43..dd4829154 100644 --- a/vsphere/resource_vsphere_compute_cluster.go +++ b/vsphere/resource_vsphere_compute_cluster.go @@ -28,6 +28,7 @@ import ( "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vsan" vsantypes "github.com/vmware/govmomi/vsan/types" ) @@ -584,6 +585,45 @@ func resourceVSphereComputeCluster() *schema.Resource { }, }, }, + "vsan_stretched_cluster": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "The configuration for stretched cluster.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preferred_fault_domain_host_ids": { + Type: schema.TypeSet, + Required: true, + Description: "The managed object IDs of the hosts to put in the first fault domain.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "secondary_fault_domain_host_ids": { + Type: schema.TypeSet, + Required: true, + Description: "The managed object IDs of the hosts to put in the second fault domain.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "witness_node": { + Type: schema.TypeString, + Required: true, + Description: "The managed object IDs of the host selected as witness node when enable stretched cluster.", + }, + "preferred_fault_domain_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of prepferred fault domain.", + Default: "Preferred", + }, + "secondary_fault_domain_name": { + Type: schema.TypeString, + Optional: true, + Description: "The name of secondary fault domain.", + Default: "Secondary", + }, + }, + }, + }, vSphereTagAttributeKey: tagsSchema(), customattribute.ConfigKey: customattribute.ConfigSchema(), }, @@ -1403,6 +1443,10 @@ func resourceVSphereComputeClusterFlattenData( return err } + if err := flattenVsanStretchedCluster(meta.(*Client).vsanClient, d, cluster, props.ConfigurationEx.(*types.ClusterConfigInfoEx)); err != nil { + return err + } + return flattenClusterConfigSpecEx(d, props.ConfigurationEx.(*types.ClusterConfigInfoEx), version) } @@ -1463,6 +1507,59 @@ func expandVsanDatastoreConfig(d *schema.ResourceData, meta interface{}) (*vsant return conf, nil } +func buildVsanStretchedClusterReq(d *schema.ResourceData, cluster types.ManagedObjectReference) (*vsantypes.VSANVcConvertToStretchedCluster, error) { + log.Printf("[DEBUG] building vsan stretched cluster request...") + conf := d.Get("vsan_stretched_cluster").([]interface{})[0].(map[string]interface{}) + + hostSet := map[interface{}]bool{} + hostCount := 0 + for _, host := range conf["preferred_fault_domain_host_ids"].(*schema.Set).List() { + hostSet[host] = true + hostCount++ + } + for _, host := range conf["secondary_fault_domain_host_ids"].(*schema.Set).List() { + hostSet[host] = true + hostCount++ + } + if len(hostSet) != hostCount { + return nil, fmt.Errorf("duplicate hostId appears in preferred fault domain host ids and secondary fault domain host ids") + } + + witness := structure.SliceStringsToManagedObjectReferences([]string{conf["witness_node"].(string)}, "HostSystem") + + faultDomainConfig := vsantypes.VimClusterVSANStretchedClusterFaultDomainConfig{ + FirstFdName: conf["preferred_fault_domain_name"].(string), + FirstFdHosts: structure.SliceInterfacesToManagedObjectReferences(conf["preferred_fault_domain_host_ids"].(*schema.Set).List(), "HostSystem"), + SecondFdName: conf["secondary_fault_domain_name"].(string), + SecondFdHosts: structure.SliceInterfacesToManagedObjectReferences(conf["secondary_fault_domain_host_ids"].(*schema.Set).List(), "HostSystem"), + } + + // TODO: make diskmapping configurable. + return &vsantypes.VSANVcConvertToStretchedCluster{ + This: vsan.VsanVcStretchedClusterSystem, + Cluster: cluster.Reference(), + FaultDomainConfig: faultDomainConfig, + WitnessHost: witness[0], + PreferredFd: conf["preferred_fault_domain_name"].(string), + }, nil +} + +func buildVsanRemoveWitnessHostReq(d *schema.ResourceData, cluster types.ManagedObjectReference, client *vsan.Client) (*vsantypes.VSANVcRemoveWitnessHost, error) { + log.Printf("[DEBUG] building vsan remove witness request...") + + res, err := vsanclient.GetWitnessHosts(client, cluster.Reference()) + if err != nil { + return nil, fmt.Errorf("failed to get witness_node when removing witness!") + } + + return &vsantypes.VSANVcRemoveWitnessHost{ + This: vsan.VsanVcStretchedClusterSystem, + Cluster: cluster.Reference(), + WitnessHost: res.Returnval[0].Host, + WitnessAddress: res.Returnval[0].UnicastAgentAddr, + }, nil +} + func resourceVSphereComputeClusterApplyVsanConfig(d *schema.ResourceData, meta interface{}, cluster *object.ClusterComputeResource) error { client, err := resourceVSphereComputeClusterClient(meta) if err != nil { @@ -1542,6 +1639,38 @@ func resourceVSphereComputeClusterApplyVsanConfig(d *schema.ResourceData, meta i return fmt.Errorf("cannot apply vsan remote datastores on cluster '%s': %s", d.Get("name").(string), err) } + // handle stretched cluster + if d.HasChange("vsan_stretched_cluster") { + _, n := d.GetChange("vsan_stretched_cluster") + // build or reconfigure stretched cluster + if len(n.([]interface{})) > 0 && n.([]interface{})[0].(map[string]interface{})["witness_node"].(string) != "" { + req, err := buildVsanStretchedClusterReq(d, cluster.Reference()) + if err != nil { + return err + } + + if err := vsanclient.ConvertToStretchedCluster(meta.(*Client).vsanClient, meta.(*Client).vimClient, *req); err != nil { + return fmt.Errorf("cannot stretch cluster %s with spec: %#v\n, err: %#v", d.Get("name").(string), *req, err) + } else { + log.Printf("[DEBUG] stretching cluster %s with spec: %#v", d.Get("name").(string), *req) + } + } + + // disable stretched cluster + if len(n.([]interface{})) == 0 || n.([]interface{})[0].(map[string]interface{})["witness_node"].(string) == "" { + req, err := buildVsanRemoveWitnessHostReq(d, cluster.Reference(), meta.(*Client).vsanClient) + if err != nil { + return err + } + + if err := vsanclient.RemoveWitnessHost(meta.(*Client).vsanClient, meta.(*Client).vimClient, *req); err != nil { + return fmt.Errorf("cannot disable stretched cluster %s with spec: %#v", d.Get("name").(string), *req) + } else { + log.Printf("[DEBUG] disabling stretched cluster %s with spec: %#v", d.Get("name").(string), *req) + } + } + } + return nil } @@ -1725,6 +1854,49 @@ func flattenVsanDisks(d *schema.ResourceData, cluster *object.ClusterComputeReso return d.Set("vsan_disk_group", diskMap) } +func flattenVsanStretchedCluster(client *vsan.Client, d *schema.ResourceData, cluster *object.ClusterComputeResource, obj *types.ClusterConfigInfoEx) error { + res, err := vsanclient.GetWitnessHosts(client, cluster.Reference()) + if err != nil { + return err + } + + if res.Returnval == nil { + return d.Set("vsan_stretched_cluster", []interface{}{}) + } + + if res.Returnval[0].UnicastAgentAddr != "" { + var conf []interface{} + + for _, witnessHost := range res.Returnval { + preferredFaultDomainName := witnessHost.PreferredFdName + var secondaryFaultDomainName string + var preferredFaultDomainHostIds []string + var secondaryFaultDomainHostIds []string + for _, hostConf := range obj.VsanHostConfig { + name := hostConf.FaultDomainInfo.Name + if name == preferredFaultDomainName { + preferredFaultDomainHostIds = append(preferredFaultDomainHostIds, hostConf.HostSystem.Value) + } else { + if secondaryFaultDomainName == "" { + secondaryFaultDomainName = name + } + secondaryFaultDomainHostIds = append(secondaryFaultDomainHostIds, hostConf.HostSystem.Value) + } + } + conf = append(conf, map[string]interface{}{ + "preferred_fault_domain_host_ids": preferredFaultDomainHostIds, + "secondary_fault_domain_host_ids": secondaryFaultDomainHostIds, + "witness_node": witnessHost.Host.Value, + "preferred_fault_domain_name": preferredFaultDomainName, + "secondary_fault_domain_name": secondaryFaultDomainName, + }) + } + return d.Set("vsan_stretched_cluster", conf) + } else { + return fmt.Errorf("error getting witness node for cluster %s, agent address was unexpectedly empty", d.Get("name").(string)) + } +} + // flattenClusterConfigSpecEx saves a ClusterConfigSpecEx into the supplied // ResourceData. func flattenClusterConfigSpecEx(d *schema.ResourceData, obj *types.ClusterConfigInfoEx, version viapi.VSphereVersion) error { diff --git a/vsphere/resource_vsphere_compute_cluster_test.go b/vsphere/resource_vsphere_compute_cluster_test.go index 2919d0fea..086d1e5b6 100644 --- a/vsphere/resource_vsphere_compute_cluster_test.go +++ b/vsphere/resource_vsphere_compute_cluster_test.go @@ -327,6 +327,50 @@ func TestAccResourceVSphereComputeCluster_vsanEsaEnabled(t *testing.T) { }) } +func TestAccResourceVSphereComputeCluster_vsanStretchedCluster(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + RunSweepers() + testAccPreCheck(t) + testAccResourceVSphereComputeClusterPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereComputeClusterCheckExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereComputeClusterStretchedClusterEnabled(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterCheckExists(true), + resource.TestCheckTypeSetElemAttrPair( + "vsphere_compute_cluster.compute_cluster", + "vsan_stretched_cluster.*.preferred_fault_domain_host_ids.*", + "data.vsphere_host.roothost1", + "id", + ), + resource.TestCheckTypeSetElemAttrPair( + "vsphere_compute_cluster.compute_cluster", + "vsan_stretched_cluster.*.secondary_fault_domain_host_ids.*", + "data.vsphere_host.roothost2", + "id", + ), + resource.TestCheckTypeSetElemAttrPair( + "vsphere_compute_cluster.compute_cluster", + "vsan_stretched_cluster.*.witness_node", + "data.vsphere_host.roothost3", + "id", + ), + ), + }, + { + Config: testAccResourceVSphereComputeClusterStretchedClusterDisabled(), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereComputeClusterCheckExists(true), + ), + }, + }, + }) +} + func TestAccResourceVSphereComputeCluster_explicitFailoverHost(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -1043,6 +1087,57 @@ resource "vsphere_compute_cluster" "compute_cluster" { ) } +func testAccResourceVSphereComputeClusterStretchedClusterEnabled() string { + return fmt.Sprintf(` +%s + +resource "vsphere_compute_cluster" "compute_cluster" { + name = "testacc-compute-cluster" + datacenter_id = data.vsphere_datacenter.rootdc1.id + host_system_ids = [data.vsphere_host.roothost1.id, data.vsphere_host.roothost2.id] + + vsan_enabled = true + vsan_stretched_cluster { + preferred_fault_domain_host_ids = [data.vsphere_host.roothost1.id] + secondary_fault_domain_host_ids = [data.vsphere_host.roothost2.id] + witness_node = data.vsphere_host.roothost3.id + } + force_evacuate_on_destroy = true +} + +`, + testhelper.CombineConfigs( + testhelper.ConfigDataRootDC1(), + testhelper.ConfigDataRootHost1(), + testhelper.ConfigDataRootHost2(), + testhelper.ConfigDataRootHost3(), + ), + ) +} + +func testAccResourceVSphereComputeClusterStretchedClusterDisabled() string { + return fmt.Sprintf(` +%s + +resource "vsphere_compute_cluster" "compute_cluster" { + name = "testacc-compute-cluster" + datacenter_id = data.vsphere_datacenter.rootdc1.id + host_system_ids = [data.vsphere_host.roothost1.id, data.vsphere_host.roothost2.id] + + vsan_enabled = true + force_evacuate_on_destroy = true +} + +`, + testhelper.CombineConfigs( + testhelper.ConfigDataRootDC1(), + testhelper.ConfigDataRootHost1(), + testhelper.ConfigDataRootHost2(), + testhelper.ConfigDataRootHost3(), + ), + ) +} + func testAccResourceVSphereComputeClusterConfigBasic() string { return fmt.Sprintf(` %s diff --git a/website/docs/r/compute_cluster.html.markdown b/website/docs/r/compute_cluster.html.markdown index 71764a54f..378a1f709 100644 --- a/website/docs/r/compute_cluster.html.markdown +++ b/website/docs/r/compute_cluster.html.markdown @@ -491,6 +491,12 @@ details, see the referenced link in the above paragraph. group in the cluster. * `cache` - The canonical name of the disk to use for vSAN cache. * `storage` - An array of disk canonical names for vSAN storage. +* `vsan_stretched_cluster` - (Optional) Configurations of vSAN stretched cluster. + * `preferred_fault_domain_host_ids` - The managed object IDs of the hosts to put in the first fault domain. + * `secondary_fault_domain_host_ids` - The managed object IDs of the hosts to put in the second fault domain. + * `witness_node` - The managed object IDs of the host selected as witness node when enable stretched cluster. + * `preferred_fault_domain_name` - (Optional) The name of first fault domain. Default is `Preferred`. + * `secondary_fault_domain_name` - (Optional) The name of second fault domain. Default is `Secondary`. ~> **NOTE:** You must disable vSphere HA before you enable vSAN on the cluster. You can enable or re-enable vSphere HA after vSAN is configured.