From 9c0389066257f0e7a092f88ebc3b5a4629a13746 Mon Sep 17 00:00:00 2001 From: Sivaanand Murugesan Date: Tue, 19 Nov 2024 02:58:23 +0530 Subject: [PATCH] PLT-1443:Added Backup storage for other providers(Minio, gcp and azure). --- docs/resources/backup_storage_location.md | 37 ++- .../resource.tf | 40 +-- go.mod | 4 +- go.sum | 4 +- .../common_backup_storage_location.go | 311 +++++++++++++++++- .../resource_backup_storage_location.go | 55 +--- 6 files changed, 383 insertions(+), 68 deletions(-) diff --git a/docs/resources/backup_storage_location.md b/docs/resources/backup_storage_location.md index e9e1a77a..fd5d2c03 100644 --- a/docs/resources/backup_storage_location.md +++ b/docs/resources/backup_storage_location.md @@ -51,22 +51,49 @@ resource "spectrocloud_backup_storage_location" "bsl2" { ### Required -- `bucket_name` (String) The name of the storage bucket where backups are stored. This is relevant for S3 or S3-compatible storage services. -- `is_default` (Boolean) Specifies if this backup storage location should be used as the default location for storing backups. - `name` (String) The name of the backup storage location. This is a unique identifier for the backup location. -- `region` (String) The region where the backup storage is located, typically corresponding to the region of the cloud provider. -- `s3` (Block List, Min: 1, Max: 1) S3-specific settings for configuring the backup storage location. (see [below for nested schema](#nestedblock--s3)) ### Optional -- `ca_cert` (String) An optional CA certificate used for SSL connections to ensure secure communication with the storage provider. +- `azure_storage_config` (Block List, Max: 1) Azure storage settings for configuring the backup storage location. (see [below for nested schema](#nestedblock--azure_storage_config)) +- `bucket_name` (String) The name of the storage bucket where backups are stored. This is relevant for S3 or S3-compatible(minio) or gcp storage services. +- `ca_cert` (String) An optional CA certificate used for SSL connections to ensure secure communication with the storage provider. This is relevant for S3 or S3-compatible(minio) storage services. - `context` (String) The context of the backup storage location. Allowed values are `project` or `tenant`. Default value is `project`. If the `project` context is specified, the project name will sourced from the provider configuration parameter [`project_name`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs#schema). +- `gcp_storage_config` (Block List, Max: 1) GCP storage settings for configuring the backup storage location. (see [below for nested schema](#nestedblock--gcp_storage_config)) +- `is_default` (Boolean) Specifies if this backup storage location should be used as the default location for storing backups. +- `location_provider` (String) The location provider for backup storage location. Allowed values are `aws` or `minio` or `gcp` or `azure`. Default value is `aws`. +- `region` (String) The region where the backup storage is located, typically corresponding to the region of the cloud provider. This is relevant for S3 or S3-compatible(minio) storage services. +- `s3` (Block List, Max: 1) S3-specific settings for configuring the backup storage location. (see [below for nested schema](#nestedblock--s3)) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only - `id` (String) The ID of this resource. + +### Nested Schema for `azure_storage_config` + +Required: + +- `azure_client_id` (String) Unique client Id from Azure console. +- `azure_client_secret` (String, Sensitive) Azure secret for authentication. +- `azure_tenant_id` (String) Unique tenant Id from Azure console. +- `container_name` (String) The container name. +- `resource_group` (String) The resource group name. +- `stock_keeping_unit` (String) The stop-keeping unit. +- `storage_name` (String) The storage name. +- `subscription_id` (String) Unique subscription Id from Azure console. + + + +### Nested Schema for `gcp_storage_config` + +Required: + +- `gcp_json_credentials` (String, Sensitive) The GCP credentials in JSON format. These credentials are required to authenticate and manage. +- `project_id` (String) The GCP project ID. + + ### Nested Schema for `s3` diff --git a/examples/resources/spectrocloud_backup_storage_location/resource.tf b/examples/resources/spectrocloud_backup_storage_location/resource.tf index dadcbdde..9299d826 100644 --- a/examples/resources/spectrocloud_backup_storage_location/resource.tf +++ b/examples/resources/spectrocloud_backup_storage_location/resource.tf @@ -1,28 +1,28 @@ resource "spectrocloud_backup_storage_location" "bsl1" { - name = "aaa-project-dev-1" - context = "project" + name = "aaa-project-dev-1" + context = "project" location_provider = "gcp" - is_default = false - region = "us-east-1" - bucket_name = "project-backup-2" -# s3 { -# credential_type = "secret" -# access_key = "access_key" -# secret_key = "secret_key" -# s3_force_path_style = false -# s3_url = "http://10.90.78.23" -# } + is_default = false + region = "us-east-1" + bucket_name = "project-backup-2" + # s3 { + # credential_type = "secret" + # access_key = "access_key" + # secret_key = "secret_key" + # s3_force_path_style = false + # s3_url = "http://10.90.78.23" + # } gcp_storage_config { - project_id = "test_id" + project_id = "test_id" gcp_json_credentials = "test test" } - azure_storage_config{ - container_name = "test-storage-container" - storage_name = "test-backup-storage" - stock_keeping_unit = "test" - resource_group = "test-group" - azure_tenant_id = "test-tenant-id" - azure_client_id = "test-client-id" + azure_storage_config { + container_name = "test-storage-container" + storage_name = "test-backup-storage" + stock_keeping_unit = "test" + resource_group = "test-group" + azure_tenant_id = "test-tenant-id" + azure_client_id = "test-client-id" azure_client_secret = "test-client-service" } } diff --git a/go.mod b/go.mod index 2fce15d5..6bb2224d 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/robfig/cron v1.2.0 github.com/spectrocloud/gomi v1.14.1-0.20240214074114-c19394812368 github.com/spectrocloud/hapi v1.14.1-0.20240214071352-81f589b1d86d - github.com/spectrocloud/palette-sdk-go v0.0.0-20241114040951-b4855be46579 + github.com/spectrocloud/palette-sdk-go v0.0.0-20241118202225-472a9f929ec3 github.com/stretchr/testify v1.9.0 gotest.tools v2.2.0+incompatible k8s.io/api v0.23.5 @@ -126,4 +126,4 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) -replace github.com/spectrocloud/palette-sdk-go => ../palette-sdk-go +//replace github.com/spectrocloud/palette-sdk-go => ../palette-sdk-go diff --git a/go.sum b/go.sum index 775b0be9..81e1c5ce 100644 --- a/go.sum +++ b/go.sum @@ -600,8 +600,8 @@ github.com/spectrocloud/gomi v1.14.1-0.20240214074114-c19394812368 h1:eY0BOyEbGu github.com/spectrocloud/gomi v1.14.1-0.20240214074114-c19394812368/go.mod h1:LlZ9We4kDaELYi7Is0SVmnySuDhwphJLS6ZT4wXxFIk= github.com/spectrocloud/hapi v1.14.1-0.20240214071352-81f589b1d86d h1:OMRbHxMJ1a+G1BYzvUYuMM0wLkYJPdnEOFx16faQ/UY= github.com/spectrocloud/hapi v1.14.1-0.20240214071352-81f589b1d86d/go.mod h1:MktpRPnSXDTHsQrFSD+daJFQ1zMLSR+1gWOL31jVvWE= -github.com/spectrocloud/palette-sdk-go v0.0.0-20241114040951-b4855be46579 h1:C8daKBQJbK2DfoIEaHYNXTXaoSNasqMSVnKnc4Q3WyI= -github.com/spectrocloud/palette-sdk-go v0.0.0-20241114040951-b4855be46579/go.mod h1:dSlNvDS0qwUWTbrYI6P8x981mcbbRHFrBg67v5zl81U= +github.com/spectrocloud/palette-sdk-go v0.0.0-20241118202225-472a9f929ec3 h1:DUTKSzkaUuKPYe6biK+vuYpO3I9gbPLPXaoEv1FSvdM= +github.com/spectrocloud/palette-sdk-go v0.0.0-20241118202225-472a9f929ec3/go.mod h1:dSlNvDS0qwUWTbrYI6P8x981mcbbRHFrBg67v5zl81U= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= diff --git a/spectrocloud/common_backup_storage_location.go b/spectrocloud/common_backup_storage_location.go index be1447b6..3cf9ad5a 100644 --- a/spectrocloud/common_backup_storage_location.go +++ b/spectrocloud/common_backup_storage_location.go @@ -1,12 +1,38 @@ package spectrocloud import ( + "context" + "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/gomi/pkg/ptr" "github.com/spectrocloud/palette-sdk-go/api/models" "github.com/spectrocloud/palette-sdk-go/client" ) +func schemaValidationForLocationProvider(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + provider := d.Get("location_provider").(string) + if (provider == "aws" || provider == "minio") && (len(d.Get("s3").([]interface{})) == 0 || d.Get("bucket_name").(string) == "" || d.Get("region").(string) == "") { + return fmt.Errorf("`s3, bucket_name & region` is required when location provider set to 'aws' or 'minio'") + } + if (provider == "aws" || provider == "minio") && (len(d.Get("azure_storage_config").([]interface{})) != 0 || (len(d.Get("gcp_storage_config").([]interface{}))) != 0) { + return fmt.Errorf("`gcp_storage_config & azure_storage_config` are not allowed when location provider set to 'aws' or 'minio'") + } + if (provider == "gcp") && (len(d.Get("gcp_storage_config").([]interface{})) == 0 || d.Get("bucket_name").(string) == "") { + return fmt.Errorf("`gcp_storage_config & bucket_name` is required when location provider set to 'gcp'") + } + if (provider == "azure") && len(d.Get("azure_storage_config").([]interface{})) == 0 { + return fmt.Errorf("`azure_storage_config` is required when location provider set to 'azure'") + } + if provider == "azure" && (len(d.Get("s3").([]interface{})) != 0 || d.Get("bucket_name").(string) != "" || d.Get("region").(string) != "" || d.Get("ca_cert").(string) != "") { + return fmt.Errorf("`s3, bucket_name, region & ca_cert` are not allowed when location provider set to 'azure'") + } + if (provider == "gcp") && (len(d.Get("azure_storage_config").([]interface{})) != 0 || d.Get("region").(string) != "" || d.Get("ca_cert").(string) != "") { + return fmt.Errorf("`azure_storage_config, region, ca_cert` are not allowed when location provider set to 'gcp'") + } + return nil +} + func S3BackupStorageLocationCreate(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { var diags diag.Diagnostics @@ -26,7 +52,35 @@ func MinioBackupStorageLocationCreate(d *schema.ResourceData, c *client.V1Client bsl := toMinioBackupStorageLocation(d) - uid, err := c.CreateS3BackupStorageLocation(bsl) + uid, err := c.CreateMinioBackupStorageLocation(bsl) + if err != nil { + return diag.FromErr(err) + } + d.SetId(uid) + + return diags +} + +func GcpBackupStorageLocationCreate(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + + bsl := toGcpBackupStorageLocation(d) + + uid, err := c.CreateGcpBackupStorageLocation(bsl) + if err != nil { + return diag.FromErr(err) + } + d.SetId(uid) + + return diags +} + +func AzureBackupStorageLocationCreate(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + + bsl := toAzureBackupStorageLocation(d) + + uid, err := c.CreateAzureBackupStorageLocation(bsl) if err != nil { return diag.FromErr(err) } @@ -103,6 +157,156 @@ func S3BackupStorageLocationRead(d *schema.ResourceData, c *client.V1Client) dia return diags } +func MinioBackupStorageLocationRead(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + + bsl, err := c.GetBackupStorageLocation(d.Id()) + if err != nil { + return diag.FromErr(err) + } else if bsl == nil { + // Deleted - Terraform will recreate it + d.SetId("") + return diags + } + + if err := d.Set("name", bsl.Metadata.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("is_default", bsl.Spec.IsDefault); err != nil { + return diag.FromErr(err) + } + + if bsl.Spec.Storage == "minio" { + s3Bsl, err := c.GetMinioBackupStorageLocation(d.Id()) + if err != nil { + return diag.FromErr(err) + } else if s3Bsl == nil { + // Deleted - Terraform will recreate it + d.SetId("") + return diags + } + if len(s3Bsl.Spec.Config.CaCert) > 0 { + if err := d.Set("ca_cert", s3Bsl.Spec.Config.CaCert); err != nil { + return diag.FromErr(err) + } + } + if err := d.Set("region", *s3Bsl.Spec.Config.Region); err != nil { + return diag.FromErr(err) + } + if err := d.Set("bucket_name", *s3Bsl.Spec.Config.BucketName); err != nil { + return diag.FromErr(err) + } + + s3 := make(map[string]interface{}) + if len(s3Bsl.Spec.Config.S3URL) > 0 { + s3["s3_url"] = s3Bsl.Spec.Config.S3URL + } + + if s3Bsl.Spec.Config.S3ForcePathStyle != nil { + s3["s3_force_path_style"] = *s3Bsl.Spec.Config.S3ForcePathStyle + } + // Minio only supports secret type credentials + s3["credential_type"] = string(s3Bsl.Spec.Config.Credentials.CredentialType) + if s3Bsl.Spec.Config.Credentials.CredentialType == models.V1AwsCloudAccountCredentialTypeSecret { + s3["access_key"] = s3Bsl.Spec.Config.Credentials.AccessKey + s3["secret_key"] = s3Bsl.Spec.Config.Credentials.SecretKey + } + s3Config := make([]interface{}, 0, 1) + s3Config = append(s3Config, s3) + if err := d.Set("s3", s3Config); err != nil { + return diag.FromErr(err) + } + } + + return diags +} + +func GcpBackupStorageLocationRead(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + + bsl, err := c.GetBackupStorageLocation(d.Id()) + if err != nil { + return diag.FromErr(err) + } else if bsl == nil { + // Deleted - Terraform will recreate it + d.SetId("") + return diags + } + if err := d.Set("name", bsl.Metadata.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("is_default", bsl.Spec.IsDefault); err != nil { + return diag.FromErr(err) + } + if bsl.Spec.Storage == "gcp" { + gcpBsl, err := c.GetGCPBackupStorageLocation(d.Id()) + if err != nil { + return diag.FromErr(err) + } else if gcpBsl == nil { + // Deleted - Terraform will recreate it + d.SetId("") + return diags + } + if err := d.Set("bucket_name", *gcpBsl.Spec.Config.BucketName); err != nil { + return diag.FromErr(err) + } + gcpConfig := make([]interface{}, 0) + if err := d.Set("bucket_name", *gcpBsl.Spec.Config.BucketName); err != nil { + return diag.FromErr(err) + } + gcpConfig = append(gcpConfig, map[string]interface{}{ + "project_id": gcpBsl.Spec.Config.ProjectID, + "gcp_json_credentials": gcpBsl.Spec.Config.Credentials.JSONCredentials, + }) + if err := d.Set("gcp_storage_config", gcpConfig); err != nil { + return diag.FromErr(err) + } + } + return diags +} + +func AzureBackupStorageLocationRead(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + + bsl, err := c.GetBackupStorageLocation(d.Id()) + if err != nil { + return diag.FromErr(err) + } else if bsl == nil { + // Deleted - Terraform will recreate it + d.SetId("") + return diags + } + if err := d.Set("name", bsl.Metadata.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("is_default", bsl.Spec.IsDefault); err != nil { + return diag.FromErr(err) + } + azureBsl, err := c.GetAzureBackupStorageLocation(d.Id()) + if err != nil { + return diag.FromErr(err) + } else if azureBsl == nil { + // Deleted - Terraform will recreate it + d.SetId("") + return diags + } + azConfig := make([]interface{}, 0) + azConfig = append(azConfig, map[string]interface{}{ + "container_name": azureBsl.Spec.Config.ContainerName, + "storage_name": azureBsl.Spec.Config.StorageName, + "stock_keeping_unit": azureBsl.Spec.Config.Sku, + "resource_group": azureBsl.Spec.Config.ResourceGroup, + "azure_tenant_id": azureBsl.Spec.Config.Credentials.TenantID, + "azure_client_id": azureBsl.Spec.Config.Credentials.ClientID, + "subscription_id": azureBsl.Spec.Config.Credentials.SubscriptionID, + "azure_client_secret": azureBsl.Spec.Config.Credentials.ClientSecret, + }) + if err := d.Set("azure_storage_config", azConfig); err != nil { + return diag.FromErr(err) + } + return diags +} + func S3BackupStorageLocationUpdate(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { var diags diag.Diagnostics bsl := toS3BackupStorageLocation(d) @@ -113,6 +317,36 @@ func S3BackupStorageLocationUpdate(d *schema.ResourceData, c *client.V1Client) d return diags } +func MinioBackupStorageLocationUpdate(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + bsl := toMinioBackupStorageLocation(d) + err := c.UpdateMinioBackupStorageLocation(d.Id(), bsl) + if err != nil { + return diag.FromErr(err) + } + return diags +} + +func GcpBackupStorageLocationUpdate(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + bsl := toGcpBackupStorageLocation(d) + err := c.UpdateGcpBackupStorageLocation(d.Id(), bsl) + if err != nil { + return diag.FromErr(err) + } + return diags +} + +func AzureBackupStorageLocationUpdate(d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + var diags diag.Diagnostics + bsl := toAzureBackupStorageLocation(d) + err := c.UpdateAzureBackupStorageLocation(d.Id(), bsl) + if err != nil { + return diag.FromErr(err) + } + return diags +} + func toS3BackupStorageLocation(d *schema.ResourceData) *models.V1UserAssetsLocationS3 { bucketName := d.Get("bucket_name").(string) region := d.Get("region").(string) @@ -161,6 +395,81 @@ func toMinioBackupStorageLocation(d *schema.ResourceData) *models.V1UserAssetsLo } } +func toGcpBackupStorageLocation(d *schema.ResourceData) *models.V1UserAssetsLocationGcp { + var account *models.V1UserAssetsLocationGcp + gcpCred := d.Get("gcp_storage_config").([]interface{})[0].(map[string]interface{}) + if len(gcpCred) > 0 { + bslName := d.Get("name").(string) + isDefault := d.Get("is_default").(bool) + bucketName := d.Get("bucket_name").(string) + projectId := gcpCred["project_id"].(string) + jsonCred := gcpCred["gcp_json_credentials"].(string) + account = &models.V1UserAssetsLocationGcp{ + Metadata: &models.V1ObjectMetaInputEntity{ + Annotations: nil, + Labels: nil, + Name: bslName, + }, + Spec: &models.V1UserAssetsLocationGcpSpec{ + Config: &models.V1GcpStorageConfig{ + BucketName: &bucketName, + Credentials: &models.V1GcpAccountEntitySpec{ + JSONCredentials: jsonCred, + }, + ProjectID: projectId, + }, + IsDefault: isDefault, + Type: "gcp", + }, + } + return account + } + + return nil +} + +func toAzureBackupStorageLocation(d *schema.ResourceData) *models.V1UserAssetsLocationAzure { + var account *models.V1UserAssetsLocationAzure + azureCred := d.Get("azure_storage_config").([]interface{})[0].(map[string]interface{}) + if len(azureCred) > 0 { + bslName := d.Get("name").(string) + isDefault := d.Get("is_default").(bool) + containerName := azureCred["project_id"].(string) + storageName := azureCred["storage_name"].(string) + sku := azureCred["stock_keeping_unit"].(string) + resourceGroup := azureCred["resource_group"].(string) + azTenantId := azureCred["azure_tenant_id"].(string) + azClientId := azureCred["azure_client_id"].(string) + azClientSecret := azureCred["azure_client_secret"].(string) + subId := azureCred["subscription_id"].(string) + account = &models.V1UserAssetsLocationAzure{ + Metadata: &models.V1ObjectMetaInputEntity{ + Name: bslName, + }, + Spec: &models.V1UserAssetsLocationAzureSpec{ + Config: &models.V1AzureStorageConfig{ + ContainerName: &containerName, + Credentials: &models.V1AzureAccountEntitySpec{ + ClientCloud: ptr.StringPtr("public"), + ClientID: azClientId, + ClientSecret: azClientSecret, + SubscriptionID: subId, + TenantID: azTenantId, + }, + ResourceGroup: &resourceGroup, + Sku: sku, + StorageName: &storageName, + }, + IsDefault: isDefault, + Type: "azure", + }, + } + + return account + } + return nil +} + func toAwsAccountCredential(s3cred map[string]interface{}) *models.V1AwsCloudAccount { account := &models.V1AwsCloudAccount{} if len(s3cred["credential_type"].(string)) == 0 || s3cred["credential_type"].(string) == "secret" { diff --git a/spectrocloud/resource_backup_storage_location.go b/spectrocloud/resource_backup_storage_location.go index 6fb3553a..886bb725 100644 --- a/spectrocloud/resource_backup_storage_location.go +++ b/spectrocloud/resource_backup_storage_location.go @@ -2,7 +2,6 @@ package spectrocloud import ( "context" - "fmt" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -51,7 +50,8 @@ func resourceBackupStorageLocation() *schema.Resource { }, "is_default": { Type: schema.TypeBool, - Required: true, + Optional: true, + Default: false, Description: "Specifies if this backup storage location should be used as the default location for storing backups.", }, "region": { @@ -173,6 +173,11 @@ func resourceBackupStorageLocation() *schema.Resource { Required: true, Description: "Unique client Id from Azure console.", }, + "subscription_id": { + Type: schema.TypeString, + Required: true, + Description: "Unique subscription Id from Azure console.", + }, "azure_client_secret": { Type: schema.TypeString, Required: true, @@ -187,30 +192,9 @@ func resourceBackupStorageLocation() *schema.Resource { } } -func schemaValidationForLocationProvider(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { - provider := d.Get("location_provider").(string) - if (provider == "aws" || provider == "minio") && (len(d.Get("s3").([]interface{})) == 0 || d.Get("bucket_name").(string) == "" || d.Get("region").(string) == "") { - return fmt.Errorf("`s3, bucket_name & region` is required when location provider set to 'aws' or 'minio'") - } - if (provider == "aws" || provider == "minio") && (len(d.Get("azure_storage_config").([]interface{})) != 0 || (len(d.Get("gcp_storage_config").([]interface{}))) != 0) { - return fmt.Errorf("`gcp_storage_config & azure_storage_config` are not allowed when location provider set to 'aws' or 'minio'") - } - if (provider == "gcp") && (len(d.Get("gcp_storage_config").([]interface{})) == 0 || d.Get("bucket_name").(string) == "") { - return fmt.Errorf("`gcp_storage_config & bucket_name` is required when location provider set to 'gcp'") - } - if (provider == "azure") && len(d.Get("azure_storage_config").([]interface{})) == 0 { - return fmt.Errorf("`azure_storage_config` is required when location provider set to 'azure'") - } - if (provider == "gcp" || provider == "azure") && (len(d.Get("s3").([]interface{})) != 0 || d.Get("bucket_name").(string) != "" || d.Get("region").(string) != "" || d.Get("ca_cert").(string) != "") { - return fmt.Errorf("`s3, bucket_name, region & ca_cert` are not allowed when location provider set to 'gcp' or 'azure'") - } - return nil -} - func resourceBackupStorageLocationCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { assetContext := d.Get("context").(string) c := getV1ClientWithResourceContext(m, assetContext) - var diags diag.Diagnostics locationProvider := d.Get("location_provider").(string) switch locationProvider { @@ -219,57 +203,52 @@ func resourceBackupStorageLocationCreate(ctx context.Context, d *schema.Resource case "minio": return MinioBackupStorageLocationCreate(d, c) case "gcp": - fmt.Println("gcp") + return GcpBackupStorageLocationCreate(d, c) case "azure": - fmt.Println("azure") + return AzureBackupStorageLocationCreate(d, c) default: return S3BackupStorageLocationCreate(d, c) } - - return diags } func resourceBackupStorageLocationRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { assetContext := d.Get("context").(string) c := getV1ClientWithResourceContext(m, assetContext) - var diags diag.Diagnostics locationProvider := d.Get("location_provider").(string) switch locationProvider { case "aws": return S3BackupStorageLocationRead(d, c) case "minio": - fmt.Println("minio") + return MinioBackupStorageLocationRead(d, c) case "gcp": - fmt.Println("gcp") + return GcpBackupStorageLocationRead(d, c) case "azure": - fmt.Println("azure") + return AzureBackupStorageLocationRead(d, c) default: return S3BackupStorageLocationRead(d, c) } - - return diags } func resourceBackupStorageLocationUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { assetContext := d.Get("context").(string) c := getV1ClientWithResourceContext(m, assetContext) - var diags diag.Diagnostics + locationProvider := d.Get("location_provider").(string) switch locationProvider { case "aws": return S3BackupStorageLocationUpdate(d, c) case "minio": - fmt.Println("minio") + return MinioBackupStorageLocationUpdate(d, c) case "gcp": - fmt.Println("gcp") + return GcpBackupStorageLocationUpdate(d, c) case "azure": - fmt.Println("azure") + return AzureBackupStorageLocationUpdate(d, c) default: return S3BackupStorageLocationUpdate(d, c) } - return diags + } func resourceBackupStorageLocationDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {