From 72681c195114991e943da961977c179fa204d813 Mon Sep 17 00:00:00 2001 From: Sivaanand Murugesan Date: Thu, 10 Aug 2023 21:27:45 +0530 Subject: [PATCH 1/4] PLT-587:Added Node repave interval support for all infra cluster (#322) * draft * PLT-587:Added Node repave interval support for all infra cluster * removed trace from provider.tf * reverting example changes * typo fix * reverted go.mod * fix go.mod * adding updated go.mod * Update spectrocloud/resource_cluster_aws.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_coxedge.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_azure.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_edge_native.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_edge_vsphere.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_gcp.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_libvirt.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_maas.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_openstack.go Co-authored-by: nikchern * Update spectrocloud/resource_cluster_vsphere.go Co-authored-by: nikchern * PLT-587: Fixing unit test. * PLT-587: Refactor hash function with interval. * Fixed Description * Completed unit tests * PLT-587: update to latest SDK. * PLT-587: docs refresh. * cp-fix * PLT-587: refresh docs. * replace flatten method to common golder * Added node_repave_validation for control plane * fix go vet issue * go vet fixess * refreshed the docs * updated error message * PEM-2850: Updated SSH_KEYS as optional attribute in edge_native (#324) * PLT-587: updating hapi and sdk dependencies. * PLT-587: remove commented code. * fixed validation issue * Fixed descrition and updated the document * PLT-587: updating dependencies. * PLT-587: updating documentation. * PLT-587: updating documentation. * Removed node_repave_interval for cox,edge_native, edge_vsphere, libvirt --------- Co-authored-by: nikchern --- docs/index.md | 27 +- docs/resources/cloudaccount_maas.md | 17 +- docs/resources/cloudaccount_openstack.md | 29 +- docs/resources/cluster_aws.md | 123 ++-- docs/resources/cluster_azure.md | 149 ++-- docs/resources/cluster_gcp.md | 1 + docs/resources/cluster_maas.md | 120 ++-- docs/resources/cluster_openstack.md | 118 ++-- docs/resources/cluster_vsphere.md | 158 +++-- examples/provider/provider.tf | 3 +- .../spectrocloud_cluster_vsphere/resource.tf | 63 ++ go.sum | 604 ---------------- spectrocloud/cluster_common_hash.go | 368 +++++----- spectrocloud/cluster_common_hash_test.go | 651 ++++++++++++++++++ spectrocloud/cluster_common_taints.go | 4 +- spectrocloud/cluster_node_common.go | 119 ++++ spectrocloud/provider.go | 2 + spectrocloud/resource_cluster_aks.go | 2 +- spectrocloud/resource_cluster_aws.go | 501 ++++++-------- .../resource_cluster_aws_expand_test.go | 79 +++ spectrocloud/resource_cluster_azure.go | 515 +++++--------- spectrocloud/resource_cluster_edge_vsphere.go | 422 ++++-------- spectrocloud/resource_cluster_eks.go | 2 +- spectrocloud/resource_cluster_gcp.go | 413 ++++------- spectrocloud/resource_cluster_libvirt.go | 4 +- spectrocloud/resource_cluster_maas.go | 438 ++++-------- spectrocloud/resource_cluster_openstack.go | 415 ++++------- spectrocloud/resource_cluster_tke.go | 2 +- spectrocloud/resource_cluster_vsphere.go | 630 ++++++++--------- templates/index.md.tmpl | 20 +- 30 files changed, 2644 insertions(+), 3355 deletions(-) create mode 100644 examples/resources/spectrocloud_cluster_vsphere/resource.tf delete mode 100644 go.sum create mode 100644 spectrocloud/cluster_common_hash_test.go create mode 100644 spectrocloud/cluster_node_common.go create mode 100644 spectrocloud/resource_cluster_aws_expand_test.go diff --git a/docs/index.md b/docs/index.md index 398d9720..ce5b4bbc 100644 --- a/docs/index.md +++ b/docs/index.md @@ -38,8 +38,7 @@ terraform { provider "spectrocloud" { host = var.sc_host # Spectro Cloud endpoint (defaults to api.spectrocloud.com) - username = var.sc_username # Username of the user (or specify with SPECTROCLOUD_USERNAME env var) - password = var.sc_password # Password (or specify with SPECTROCLOUD_PASSWORD env var) + api_key = var.sc_api_key # API key (or specify with SPECTROCLOUD_APIKEY env var) project_name = var.sc_project_name # Project name (e.g: Default) } ``` @@ -57,7 +56,7 @@ sc_project_name = "{enter Spectro Cloud project Name}" #e.g: Default ``` -> -Be sure to populate the `username`, `password`, and other terraform vars. +Be sure to populate the `sc_host`, `sc_api_key`, and other terraform vars. Copy one of the resource configuration files (e.g: spectrocloud_cluster_profile) from the _Resources_ documentation. Be sure to specify all required parameters. @@ -75,28 +74,14 @@ For an end-to-end example of provisioning Spectro Cloud resources, visit: Credentials and other configurations can be provided through environment variables. The following environment variables are availabe. -- `SPECTROCLOUD_HOST` -- `SPECTROCLOUD_USERNAME` -- `SPECTROCLOUD_PASSWORD` +- `SPECTROCLOUD_HOST` - `SPECTROCLOUD_APIKEY` - `SPECTROCLOUD_TRACE` - `SPECTROCLOUD_RETRY_ATTEMPTS` ## Authentication -You can use the environment variables to authenticate with Spectro Cloud with your username and password. - --> **Note:** The API key takes precendence over the username and password authentication flow. - -```shell -export SPECTROCLOUD_USERNAME=myUserName -export SPECTROCLOUD_PASSWORD=myPassword -``` -```hcl -provider "spectrocloud" {} -``` - -Alternatively, you may use an API key to authenticate with Spectro Cloud. Visit the User Management API Key [documentation](https://docs.spectrocloud.com/user-management/user-authentication/#usingapikey) to learn more about Spectro Cloud API keys. +You can use an API key to authenticate with Spectro Cloud. Visit the User Management API Key [documentation](https://docs.spectrocloud.com/user-management/user-authentication/#usingapikey) to learn more about Spectro Cloud API keys. ```shell export SPECTROCLOUD_APIKEY=5b7aad......... ``` @@ -117,8 +102,8 @@ provider GitHub [discussion board](https://github.com/spectrocloud/terraform-pro - `api_key` (String, Sensitive) The Spectro Cloud API key. Can also be set with the `SPECTROCLOUD_API_KEY` environment variable. - `host` (String) The Spectro Cloud API host url. Can also be set with the `SPECTROCLOUD_HOST` environment variable. Defaults to https://api.spectrocloud.com - `ignore_insecure_tls_error` (Boolean) Ignore insecure TLS errors for Spectro Cloud API endpoints. Defaults to false. -- `password` (String, Sensitive) The Spectro Cloud user password. Can also be set with the `SPECTROCLOUD_PASSWORD` environment variable. +- `password` (String, Sensitive, Deprecated) The Spectro Cloud user password. Can also be set with the `SPECTROCLOUD_PASSWORD` environment variable. - `project_name` (String) The Spectro Cloud project name. - `retry_attempts` (Number) Number of retry attempts. Can also be set with the `SPECTROCLOUD_RETRY_ATTEMPTS` environment variable. Defaults to 10. - `trace` (Boolean) Enable HTTP request tracing. Can also be set with the `SPECTROCLOUD_TRACE` environment variable. To enable Terraform debug logging, set `TF_LOG=DEBUG`. Visit the Terraform documentation to learn more about Terraform [debugging](https://developer.hashicorp.com/terraform/plugin/log/managing). -- `username` (String) The Spectro Cloud username. Can also be set with the `SPECTROCLOUD_USERNAME` environment variable. +- `username` (String, Deprecated) The Spectro Cloud username. Can also be set with the `SPECTROCLOUD_USERNAME` environment variable. diff --git a/docs/resources/cloudaccount_maas.md b/docs/resources/cloudaccount_maas.md index 252428bf..42bf6f6a 100644 --- a/docs/resources/cloudaccount_maas.md +++ b/docs/resources/cloudaccount_maas.md @@ -1,5 +1,4 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cloudaccount_maas Resource - terraform-provider-spectrocloud" subcategory: "" description: |- @@ -8,7 +7,7 @@ description: |- # spectrocloud_cloudaccount_maas (Resource) - + ## Example Usage @@ -20,21 +19,21 @@ resource "spectrocloud_cloudaccount_maas" "maas-1" { } ``` + ## Schema ### Required -- `name` (String) +- `maas_api_endpoint` (String) Endpoint of the MAAS API that is used to connect to the MAAS cloud. I.e. http://maas:5240/MAAS +- `maas_api_key` (String, Sensitive) API key that is used to connect to the MAAS cloud. +- `name` (String) Name of the MAAS cloud account. +- `private_cloud_gateway_id` (String) ID of the private cloud gateway that is used to connect to the MAAS cloud. ### Optional -- `maas_api_endpoint` (String) -- `maas_api_key` (String, Sensitive) -- `private_cloud_gateway_id` (String) +- `context` (String) The context of the MAAS configuration. Can be `project` or `tenant`. ### Read-Only -- `id` (String) The ID of this resource. - - +- `id` (String) The ID of this resource. \ No newline at end of file diff --git a/docs/resources/cloudaccount_openstack.md b/docs/resources/cloudaccount_openstack.md index 40a21424..eeee66b6 100644 --- a/docs/resources/cloudaccount_openstack.md +++ b/docs/resources/cloudaccount_openstack.md @@ -1,5 +1,4 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cloudaccount_openstack Resource - terraform-provider-spectrocloud" subcategory: "" description: |- @@ -8,7 +7,7 @@ description: |- # spectrocloud_cloudaccount_openstack (Resource) - + ## Example Usage @@ -25,27 +24,27 @@ resource "spectrocloud_cloudaccount_openstack" "account" { } ``` + ## Schema ### Required -- `default_domain` (String) -- `default_project` (String) -- `identity_endpoint` (String) -- `name` (String) -- `openstack_password` (String, Sensitive) -- `openstack_username` (String) -- `parent_region` (String) -- `private_cloud_gateway_id` (String) +- `default_domain` (String) The default domain of the OpenStack cloud that is used to connect to the OpenStack cloud. +- `default_project` (String) The default project of the OpenStack cloud that is used to connect to the OpenStack cloud. +- `identity_endpoint` (String) The identity endpoint of the OpenStack cloud that is used to connect to the OpenStack cloud. +- `name` (String) Name of the OpenStack cloud account. +- `openstack_password` (String, Sensitive) The password of the OpenStack cloud that is used to connect to the OpenStack cloud. +- `openstack_username` (String) The username of the OpenStack cloud that is used to connect to the OpenStack cloud. +- `parent_region` (String) The parent region of the OpenStack cloud that is used to connect to the OpenStack cloud. +- `private_cloud_gateway_id` (String) ID of the private cloud gateway that is used to connect to the OpenStack cloud. ### Optional -- `ca_certificate` (String) -- `openstack_allow_insecure` (Boolean) +- `ca_certificate` (String) The CA certificate of the OpenStack cloud that is used to connect to the OpenStack cloud. +- `context` (String) The context of the OpenStack configuration. Can be `project` or `tenant`. +- `openstack_allow_insecure` (Boolean) Whether to allow insecure connections to the OpenStack cloud. Default is `false`. ### Read-Only -- `id` (String) The ID of this resource. - - +- `id` (String) The ID of this resource. \ No newline at end of file diff --git a/docs/resources/cluster_aws.md b/docs/resources/cluster_aws.md index 0abcf44d..972cd950 100644 --- a/docs/resources/cluster_aws.md +++ b/docs/resources/cluster_aws.md @@ -1,17 +1,17 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_aws Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - + Resource for managing AWS clusters in Spectro Cloud through Palette. --- # spectrocloud_cluster_aws (Resource) - + Resource for managing AWS clusters in Spectro Cloud through Palette. ## Example Usage + ```terraform data "spectrocloud_cloudaccount_aws" "account" { # id = @@ -119,6 +119,7 @@ resource "spectrocloud_cluster_aws" "cluster" { } ``` + ## Schema @@ -134,24 +135,23 @@ resource "spectrocloud_cluster_aws" "cluster" { - `apply_setting` (String) - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) -- `cluster_profile_id` (String, Deprecated) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) +- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) -- `os_patch_on_boot` (Boolean) -- `os_patch_schedule` (String) -- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) +- `os_patch_after` (String) Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00` +- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. +- `os_patch_schedule` (String) The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`. - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) -- `tags` (Set of String) +- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. +- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String) +- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. - `id` (String) The ID of this resource. -- `kubeconfig` (String) +- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. - `location_config` (List of Object) (see [below for nested schema](#nestedatt--location_config)) @@ -172,31 +172,35 @@ Optional: Required: -- `count` (Number) +- `count` (Number) Number of nodes in the machine pool. - `instance_type` (String) - `name` (String) Optional: - `additional_labels` (Map of String) +- `additional_security_groups` (Set of String) Additional security groups to attach to the instance. - `az_subnets` (Map of String) Mutually exclusive with `azs`. Use `az_subnets` for Static provisioning. - `azs` (Set of String) Mutually exclusive with `az_subnets`. Use `azs` for Dynamic provisioning. - `capacity_type` (String) Capacity type is an instance type, can be 'on-demand' or 'spot'. Defaults to 'on-demand'. -- `control_plane` (Boolean) -- `control_plane_as_worker` (Boolean) +- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. +- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. - `disk_size_gb` (Number) +- `max` (Number) Maximum number of nodes in the machine pool. This is used for autoscaling the machine pool. - `max_price` (String) +- `min` (Number) Minimum number of nodes in the machine pool. This is used for autoscaling the machine pool. +- `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) +- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. ### Nested Schema for `machine_pool.taints` Required: -- `effect` (String) -- `key` (String) -- `value` (String) +- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. +- `key` (String) The key of the taint. +- `value` (String) The value of the taint. @@ -205,51 +209,58 @@ Required: Required: -- `backup_location_id` (String) -- `expiry_in_hour` (Number) -- `prefix` (String) -- `schedule` (String) +- `backup_location_id` (String) The ID of the backup location to use for the backup. +- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. +- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. +- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. Optional: -- `include_cluster_resources` (Boolean) -- `include_disks` (Boolean) -- `namespaces` (Set of String) +- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. +- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. +- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. +- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. +- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. ### Nested Schema for `cluster_profile` -Optional: +Required: -- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +- `id` (String) The ID of the cluster profile. -Read-Only: +Optional: -- `id` (String) The ID of this resource. +- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) -- `values` (String) +- `name` (String) The name of the pack. The name must be unique within the cluster profile. Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) -- `tag` (String) -- `type` (String) +- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. +- `tag` (String) The tag of the pack. The tag is the version of the pack. +- `type` (String) The type of the pack. The default value is `spectro`. +- `uid` (String) +- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) -- `name` (String) +- `content` (String) The content of the manifest. The content is the YAML content of the manifest. +- `name` (String) The name of the manifest. The name must be unique within the pack. + +Read-Only: + +- `uid` (String) @@ -259,12 +270,12 @@ Required: Required: -- `type` (String) +- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. Optional: -- `namespace` (String) -- `role` (Map of String) +- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -272,12 +283,12 @@ Optional: Required: -- `name` (String) -- `type` (String) +- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. +- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. Optional: -- `namespace` (String) +- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. @@ -297,22 +308,12 @@ Optional: Required: -- `name` (String) -- `resource_allocation` (Map of String) - - - -### Nested Schema for `pack` - -Required: - -- `name` (String) -- `tag` (String) -- `values` (String) +- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. +- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` Optional: -- `registry_uid` (String) +- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` @@ -320,9 +321,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) -- `conformance_scan_schedule` (String) -- `penetration_scan_schedule` (String) +- `configuration_scan_schedule` (String) The schedule for configuration scan. +- `conformance_scan_schedule` (String) The schedule for conformance scan. +- `penetration_scan_schedule` (String) The schedule for penetration scan. @@ -345,6 +346,4 @@ Read-Only: - `latitude` (Number) - `longitude` (Number) - `region_code` (String) -- `region_name` (String) - - +- `region_name` (String) \ No newline at end of file diff --git a/docs/resources/cluster_azure.md b/docs/resources/cluster_azure.md index c2920b11..10977328 100644 --- a/docs/resources/cluster_azure.md +++ b/docs/resources/cluster_azure.md @@ -1,14 +1,13 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_azure Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - + Resource for managing Azure clusters in Spectro Cloud through Palette. --- # spectrocloud_cluster_azure (Resource) - + Resource for managing Azure clusters in Spectro Cloud through Palette. ## Example Usage @@ -82,38 +81,40 @@ resource "spectrocloud_cluster_azure" "cluster" { } ``` + + ## Schema ### Required -- `cloud_account_id` (String) +- `cloud_account_id` (String) ID of the cloud account to be used for the cluster. This cloud account must be of type `azure`. - `cloud_config` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--cloud_config)) - `machine_pool` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--machine_pool)) -- `name` (String) +- `name` (String) Name of the cluster. This name will be used to create the cluster in Azure. ### Optional -- `apply_setting` (String) +- `apply_setting` (String) Apply setting for the cluster. This can be set to `on_create` or `on_update`. - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) +- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) -- `os_patch_on_boot` (Boolean) -- `os_patch_schedule` (String) -- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) +- `os_patch_after` (String) Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00` +- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. +- `os_patch_schedule` (String) Cron schedule for OS patching. This must be in the form of `0 0 * * *`. - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) -- `tags` (Set of String) +- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. +- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String) +- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. - `id` (String) The ID of this resource. -- `kubeconfig` (String) +- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. - `location_config` (List of Object) (see [below for nested schema](#nestedatt--location_config)) @@ -121,10 +122,10 @@ resource "spectrocloud_cluster_azure" "cluster" { Required: -- `region` (String) -- `resource_group` (String) -- `ssh_key` (String) -- `subscription_id` (String) +- `region` (String) Azure region. This can be found in the Azure portal under `Resource groups`. +- `resource_group` (String) Azure resource group. This can be found in the Azure portal under `Resource groups`. +- `ssh_key` (String) SSH key to be used for the cluster nodes. +- `subscription_id` (String) Azure subscription ID. This can be found in the Azure portal under `Subscriptions`. @@ -132,29 +133,30 @@ Required: Required: -- `azs` (Set of String) -- `count` (Number) -- `instance_type` (String) -- `is_system_node_pool` (Boolean) -- `name` (String) +- `azs` (Set of String) Availability zones for the machine pool. +- `count` (Number) Number of nodes in the machine pool. +- `instance_type` (String) Azure instance type from the Azure portal. +- `name` (String) Name of the machine pool. This must be unique within the cluster. Optional: - `additional_labels` (Map of String) -- `control_plane` (Boolean) -- `control_plane_as_worker` (Boolean) -- `disk` (Block List, Max: 1) (see [below for nested schema](#nestedblock--machine_pool--disk)) -- `os_type` (String) +- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. +- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `disk` (Block List, Max: 1) Disk configuration for the machine pool. (see [below for nested schema](#nestedblock--machine_pool--disk)) +- `is_system_node_pool` (Boolean) Whether this machine pool is a system node pool. Default value is `false'. +- `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. +- `os_type` (String) Operating system type for the machine pool. Valid values are `Linux` and `Windows`. Defaults to `Linux`. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) +- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. ### Nested Schema for `machine_pool.disk` Required: -- `size_gb` (Number) -- `type` (String) +- `size_gb` (Number) Size of the disk in GB. +- `type` (String) Type of the disk. Valid values are `Standard_LRS`, `StandardSSD_LRS`, `Premium_LRS`. @@ -162,9 +164,9 @@ Required: Required: -- `effect` (String) -- `key` (String) -- `value` (String) +- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. +- `key` (String) The key of the taint. +- `value` (String) The value of the taint. @@ -173,51 +175,58 @@ Required: Required: -- `backup_location_id` (String) -- `expiry_in_hour` (Number) -- `prefix` (String) -- `schedule` (String) +- `backup_location_id` (String) The ID of the backup location to use for the backup. +- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. +- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. +- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. Optional: -- `include_cluster_resources` (Boolean) -- `include_disks` (Boolean) -- `namespaces` (Set of String) +- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. +- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. +- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. +- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. +- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. ### Nested Schema for `cluster_profile` -Optional: +Required: -- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +- `id` (String) The ID of the cluster profile. -Read-Only: +Optional: -- `id` (String) The ID of this resource. +- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) -- `values` (String) +- `name` (String) The name of the pack. The name must be unique within the cluster profile. Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) -- `tag` (String) -- `type` (String) +- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. +- `tag` (String) The tag of the pack. The tag is the version of the pack. +- `type` (String) The type of the pack. The default value is `spectro`. +- `uid` (String) +- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) -- `name` (String) +- `content` (String) The content of the manifest. The content is the YAML content of the manifest. +- `name` (String) The name of the manifest. The name must be unique within the pack. + +Read-Only: + +- `uid` (String) @@ -227,12 +236,12 @@ Required: Required: -- `type` (String) +- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. Optional: -- `namespace` (String) -- `role` (Map of String) +- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -240,12 +249,12 @@ Optional: Required: -- `name` (String) -- `type` (String) +- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. +- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. Optional: -- `namespace` (String) +- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. @@ -265,22 +274,12 @@ Optional: Required: -- `name` (String) -- `resource_allocation` (Map of String) - - - -### Nested Schema for `pack` - -Required: - -- `name` (String) -- `tag` (String) -- `values` (String) +- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. +- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` Optional: -- `registry_uid` (String) +- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` @@ -288,9 +287,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) -- `conformance_scan_schedule` (String) -- `penetration_scan_schedule` (String) +- `configuration_scan_schedule` (String) The schedule for configuration scan. +- `conformance_scan_schedule` (String) The schedule for conformance scan. +- `penetration_scan_schedule` (String) The schedule for penetration scan. @@ -313,6 +312,4 @@ Read-Only: - `latitude` (Number) - `longitude` (Number) - `region_code` (String) -- `region_name` (String) - - +- `region_name` (String) \ No newline at end of file diff --git a/docs/resources/cluster_gcp.md b/docs/resources/cluster_gcp.md index 9f27beb1..befe0f48 100644 --- a/docs/resources/cluster_gcp.md +++ b/docs/resources/cluster_gcp.md @@ -142,6 +142,7 @@ Optional: - `control_plane` (Boolean) - `control_plane_as_worker` (Boolean) - `disk_size_gb` (Number) +- `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) - `update_strategy` (String) diff --git a/docs/resources/cluster_maas.md b/docs/resources/cluster_maas.md index 93a338ea..2b32c76e 100644 --- a/docs/resources/cluster_maas.md +++ b/docs/resources/cluster_maas.md @@ -1,14 +1,13 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_maas Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - + Resource for managing MAAS clusters in Spectro Cloud through Palette. --- # spectrocloud_cluster_maas (Resource) - + Resource for managing MAAS clusters in Spectro Cloud through Palette. ## Example Usage @@ -89,6 +88,7 @@ resource "spectrocloud_cluster_maas" "cluster" { } ``` + ## Schema @@ -105,23 +105,23 @@ resource "spectrocloud_cluster_maas" "cluster" { - `cloud_account_id` (String) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) +- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `location_config` (Block List) (see [below for nested schema](#nestedblock--location_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) -- `os_patch_on_boot` (Boolean) -- `os_patch_schedule` (String) -- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) +- `os_patch_after` (String) The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00` +- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. +- `os_patch_schedule` (String) Cron schedule for OS patching. This must be in the form of `0 0 * * *`. - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) -- `tags` (Set of String) +- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. +- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String) +- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. - `id` (String) The ID of this resource. -- `kubeconfig` (String) +- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. ### Nested Schema for `cloud_config` @@ -137,7 +137,7 @@ Required: Required: - `azs` (Set of String) -- `count` (Number) +- `count` (Number) Number of nodes in the machine pool. - `instance_type` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--machine_pool--instance_type)) - `name` (String) - `placement` (Block List, Min: 1) (see [below for nested schema](#nestedblock--machine_pool--placement)) @@ -145,10 +145,13 @@ Required: Optional: - `additional_labels` (Map of String) -- `control_plane` (Boolean) -- `control_plane_as_worker` (Boolean) +- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. +- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `max` (Number) Maximum number of nodes in the machine pool. This is used for autoscaling the machine pool. +- `min` (Number) Minimum number of nodes in the machine pool. This is used for autoscaling the machine pool. +- `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) +- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. ### Nested Schema for `machine_pool.instance_type` @@ -176,9 +179,9 @@ Read-Only: Required: -- `effect` (String) -- `key` (String) -- `value` (String) +- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. +- `key` (String) The key of the taint. +- `value` (String) The value of the taint. @@ -187,51 +190,58 @@ Required: Required: -- `backup_location_id` (String) -- `expiry_in_hour` (Number) -- `prefix` (String) -- `schedule` (String) +- `backup_location_id` (String) The ID of the backup location to use for the backup. +- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. +- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. +- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. Optional: -- `include_cluster_resources` (Boolean) -- `include_disks` (Boolean) -- `namespaces` (Set of String) +- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. +- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. +- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. +- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. +- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. ### Nested Schema for `cluster_profile` -Optional: +Required: -- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +- `id` (String) The ID of the cluster profile. -Read-Only: +Optional: -- `id` (String) The ID of this resource. +- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) -- `values` (String) +- `name` (String) The name of the pack. The name must be unique within the cluster profile. Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) -- `tag` (String) -- `type` (String) +- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. +- `tag` (String) The tag of the pack. The tag is the version of the pack. +- `type` (String) The type of the pack. The default value is `spectro`. +- `uid` (String) +- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) -- `name` (String) +- `content` (String) The content of the manifest. The content is the YAML content of the manifest. +- `name` (String) The name of the manifest. The name must be unique within the pack. + +Read-Only: + +- `uid` (String) @@ -241,12 +251,12 @@ Required: Required: -- `type` (String) +- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. Optional: -- `namespace` (String) -- `role` (Map of String) +- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -254,12 +264,12 @@ Optional: Required: -- `name` (String) -- `type` (String) +- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. +- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. Optional: -- `namespace` (String) +- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. @@ -295,22 +305,12 @@ Optional: Required: -- `name` (String) -- `resource_allocation` (Map of String) - - - -### Nested Schema for `pack` - -Required: - -- `name` (String) -- `tag` (String) -- `values` (String) +- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. +- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` Optional: -- `registry_uid` (String) +- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` @@ -318,9 +318,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) -- `conformance_scan_schedule` (String) -- `penetration_scan_schedule` (String) +- `configuration_scan_schedule` (String) The schedule for configuration scan. +- `conformance_scan_schedule` (String) The schedule for conformance scan. +- `penetration_scan_schedule` (String) The schedule for penetration scan. @@ -330,6 +330,4 @@ Optional: - `create` (String) - `delete` (String) -- `update` (String) - - +- `update` (String) \ No newline at end of file diff --git a/docs/resources/cluster_openstack.md b/docs/resources/cluster_openstack.md index 9646e2df..9bffa9e2 100644 --- a/docs/resources/cluster_openstack.md +++ b/docs/resources/cluster_openstack.md @@ -1,14 +1,13 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_openstack Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - + Resource for managing Openstack clusters in Spectro Cloud through Palette. --- # spectrocloud_cluster_openstack (Resource) - + Resource for managing Openstack clusters in Spectro Cloud through Palette. ## Example Usage @@ -80,6 +79,7 @@ resource "spectrocloud_cluster_openstack" "cluster" { } ``` + ## Schema @@ -96,23 +96,23 @@ resource "spectrocloud_cluster_openstack" "cluster" { - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) +- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `location_config` (Block List) (see [below for nested schema](#nestedblock--location_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) -- `os_patch_on_boot` (Boolean) -- `os_patch_schedule` (String) -- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) +- `os_patch_after` (String) The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00` +- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. +- `os_patch_schedule` (String) Cron schedule for OS patching. This must be in the form of `0 0 * * *`. - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) -- `tags` (Set of String) +- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. +- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String) +- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. - `id` (String) The ID of this resource. -- `kubeconfig` (String) +- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. ### Nested Schema for `cloud_config` @@ -137,7 +137,7 @@ Optional: Required: -- `count` (Number) +- `count` (Number) Number of nodes in the machine pool. - `instance_type` (String) - `name` (String) @@ -145,20 +145,21 @@ Optional: - `additional_labels` (Map of String) - `azs` (Set of String) -- `control_plane` (Boolean) -- `control_plane_as_worker` (Boolean) +- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. +- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `subnet_id` (String) - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) +- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. ### Nested Schema for `machine_pool.taints` Required: -- `effect` (String) -- `key` (String) -- `value` (String) +- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. +- `key` (String) The key of the taint. +- `value` (String) The value of the taint. @@ -167,51 +168,58 @@ Required: Required: -- `backup_location_id` (String) -- `expiry_in_hour` (Number) -- `prefix` (String) -- `schedule` (String) +- `backup_location_id` (String) The ID of the backup location to use for the backup. +- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. +- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. +- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. Optional: -- `include_cluster_resources` (Boolean) -- `include_disks` (Boolean) -- `namespaces` (Set of String) +- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. +- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. +- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. +- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. +- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. ### Nested Schema for `cluster_profile` -Optional: +Required: -- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +- `id` (String) The ID of the cluster profile. -Read-Only: +Optional: -- `id` (String) The ID of this resource. +- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) -- `values` (String) +- `name` (String) The name of the pack. The name must be unique within the cluster profile. Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) -- `tag` (String) -- `type` (String) +- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. +- `tag` (String) The tag of the pack. The tag is the version of the pack. +- `type` (String) The type of the pack. The default value is `spectro`. +- `uid` (String) +- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) -- `name` (String) +- `content` (String) The content of the manifest. The content is the YAML content of the manifest. +- `name` (String) The name of the manifest. The name must be unique within the pack. + +Read-Only: + +- `uid` (String) @@ -221,12 +229,12 @@ Required: Required: -- `type` (String) +- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. Optional: -- `namespace` (String) -- `role` (Map of String) +- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -234,12 +242,12 @@ Optional: Required: -- `name` (String) -- `type` (String) +- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. +- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. Optional: -- `namespace` (String) +- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. @@ -275,22 +283,12 @@ Optional: Required: -- `name` (String) -- `resource_allocation` (Map of String) - - - -### Nested Schema for `pack` - -Required: - -- `name` (String) -- `tag` (String) -- `values` (String) +- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. +- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` Optional: -- `registry_uid` (String) +- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` @@ -298,9 +296,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) -- `conformance_scan_schedule` (String) -- `penetration_scan_schedule` (String) +- `configuration_scan_schedule` (String) The schedule for configuration scan. +- `conformance_scan_schedule` (String) The schedule for conformance scan. +- `penetration_scan_schedule` (String) The schedule for penetration scan. @@ -310,6 +308,4 @@ Optional: - `create` (String) - `delete` (String) -- `update` (String) - - +- `update` (String) \ No newline at end of file diff --git a/docs/resources/cluster_vsphere.md b/docs/resources/cluster_vsphere.md index 52e959f3..11de9c3c 100644 --- a/docs/resources/cluster_vsphere.md +++ b/docs/resources/cluster_vsphere.md @@ -1,13 +1,15 @@ --- -# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_vsphere Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - + A resource to manage a vSphere cluster in Pallette. --- # spectrocloud_cluster_vsphere (Resource) + A resource to manage a vSphere cluster in Pallette. + +## Example Usage @@ -17,51 +19,51 @@ description: |- ### Required -- `cloud_account_id` (String) +- `cloud_account_id` (String) ID of the cloud account to be used for the cluster. This cloud account must be of type `vsphere`. - `cloud_config` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--cloud_config)) - `machine_pool` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--machine_pool)) -- `name` (String) +- `name` (String) The name of the cluster. ### Optional -- `apply_setting` (String) +- `apply_setting` (String) The setting to apply the cluster profile. `DownloadAndInstall` will download and install packs in one action. `DownloadAndInstallLater` will only download artifact and postpone install for later. Default value is `DownloadAndInstall`. - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) -- `cluster_profile_id` (String, Deprecated) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) +- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `location_config` (Block List) (see [below for nested schema](#nestedblock--location_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) -- `os_patch_on_boot` (Boolean) -- `os_patch_schedule` (String) -- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) +- `os_patch_after` (String) The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00` +- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. +- `os_patch_schedule` (String) The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`. - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) -- `tags` (Set of String) +- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. +- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String) +- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. - `id` (String) The ID of this resource. -- `kubeconfig` (String) +- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. ### Nested Schema for `cloud_config` Required: -- `datacenter` (String) -- `folder` (String) -- `ssh_key` (String) +- `datacenter` (String) The name of the datacenter in vSphere. This is the name of the datacenter as it appears in vSphere. +- `folder` (String) The name of the folder in vSphere. This is the name of the folder as it appears in vSphere. +- `ssh_key` (String) The SSH key to be used for the cluster. This is the public key that will be used to access the cluster. Optional: -- `image_template_folder` (String) -- `network_search_domain` (String) -- `network_type` (String) -- `static_ip` (Boolean) +- `image_template_folder` (String) The name of the image template folder in vSphere. This is the name of the folder as it appears in vSphere. +- `network_search_domain` (String) The search domain to use for the cluster in case of DHCP. +- `network_type` (String) The type of network to use for the cluster. This can be `VIP` or `DDNS`. +- `ntp_servers` (Set of String) A list of NTP servers to be used by the cluster. +- `static_ip` (Boolean) Whether to use static IP addresses for the cluster. If `true`, the cluster will use static IP addresses. If `false`, the cluster will use DDNS. Default is `false`. @@ -69,27 +71,28 @@ Optional: Required: -- `count` (Number) +- `count` (Number) Number of nodes in the machine pool. - `instance_type` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--machine_pool--instance_type)) -- `name` (String) +- `name` (String) The name of the machine pool. This is used to identify the machine pool in the cluster. - `placement` (Block List, Min: 1) (see [below for nested schema](#nestedblock--machine_pool--placement)) Optional: - `additional_labels` (Map of String) -- `control_plane` (Boolean) -- `control_plane_as_worker` (Boolean) +- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. +- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) +- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. ### Nested Schema for `machine_pool.instance_type` Required: -- `cpu` (Number) -- `disk_size_gb` (Number) -- `memory_mb` (Number) +- `cpu` (Number) The number of CPUs. +- `disk_size_gb` (Number) The size of the disk in GB. +- `memory_mb` (Number) The amount of memory in MB. @@ -97,14 +100,14 @@ Required: Required: -- `cluster` (String) -- `datastore` (String) -- `network` (String) -- `resource_pool` (String) +- `cluster` (String) The name of the cluster to use for the machine pool. As it appears in the vSphere. +- `datastore` (String) The name of the datastore to use for the machine pool. As it appears in the vSphere. +- `network` (String) The name of the network to use for the machine pool. As it appears in the vSphere. +- `resource_pool` (String) The name of the resource pool to use for the machine pool. As it appears in the vSphere. Optional: -- `static_ip_pool_id` (String) +- `static_ip_pool_id` (String) The ID of the static IP pool to use for the machine pool in case of static cluster placement. Read-Only: @@ -116,9 +119,9 @@ Read-Only: Required: -- `effect` (String) -- `key` (String) -- `value` (String) +- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. +- `key` (String) The key of the taint. +- `value` (String) The value of the taint. @@ -127,51 +130,58 @@ Required: Required: -- `backup_location_id` (String) -- `expiry_in_hour` (Number) -- `prefix` (String) -- `schedule` (String) +- `backup_location_id` (String) The ID of the backup location to use for the backup. +- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. +- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. +- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. Optional: -- `include_cluster_resources` (Boolean) -- `include_disks` (Boolean) -- `namespaces` (Set of String) +- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. +- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. +- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. +- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. +- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. ### Nested Schema for `cluster_profile` -Optional: +Required: -- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +- `id` (String) The ID of the cluster profile. -Read-Only: +Optional: -- `id` (String) The ID of this resource. +- `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) -- `values` (String) +- `name` (String) The name of the pack. The name must be unique within the cluster profile. Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) -- `tag` (String) -- `type` (String) +- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. +- `tag` (String) The tag of the pack. The tag is the version of the pack. +- `type` (String) The type of the pack. The default value is `spectro`. +- `uid` (String) +- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) -- `name` (String) +- `content` (String) The content of the manifest. The content is the YAML content of the manifest. +- `name` (String) The name of the manifest. The name must be unique within the pack. + +Read-Only: + +- `uid` (String) @@ -181,12 +191,12 @@ Required: Required: -- `type` (String) +- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. Optional: -- `namespace` (String) -- `role` (Map of String) +- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -194,12 +204,12 @@ Optional: Required: -- `name` (String) -- `type` (String) +- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. +- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. Optional: -- `namespace` (String) +- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. @@ -235,22 +245,12 @@ Optional: Required: -- `name` (String) -- `resource_allocation` (Map of String) - - - -### Nested Schema for `pack` - -Required: - -- `name` (String) -- `tag` (String) -- `values` (String) +- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. +- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` Optional: -- `registry_uid` (String) +- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` @@ -258,9 +258,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) -- `conformance_scan_schedule` (String) -- `penetration_scan_schedule` (String) +- `configuration_scan_schedule` (String) The schedule for configuration scan. +- `conformance_scan_schedule` (String) The schedule for conformance scan. +- `penetration_scan_schedule` (String) The schedule for penetration scan. @@ -270,6 +270,4 @@ Optional: - `create` (String) - `delete` (String) -- `update` (String) - - +- `update` (String) \ No newline at end of file diff --git a/examples/provider/provider.tf b/examples/provider/provider.tf index b42b036c..34f00f68 100644 --- a/examples/provider/provider.tf +++ b/examples/provider/provider.tf @@ -9,7 +9,6 @@ terraform { provider "spectrocloud" { host = var.sc_host # Spectro Cloud endpoint (defaults to api.spectrocloud.com) - username = var.sc_username # Username of the user (or specify with SPECTROCLOUD_USERNAME env var) - password = var.sc_password # Password (or specify with SPECTROCLOUD_PASSWORD env var) + api_key = var.sc_api_key # API key (or specify with SPECTROCLOUD_APIKEY env var) project_name = var.sc_project_name # Project name (e.g: Default) } diff --git a/examples/resources/spectrocloud_cluster_vsphere/resource.tf b/examples/resources/spectrocloud_cluster_vsphere/resource.tf new file mode 100644 index 00000000..e632c875 --- /dev/null +++ b/examples/resources/spectrocloud_cluster_vsphere/resource.tf @@ -0,0 +1,63 @@ +data "spectrocloud_cluster_profile" "vmware_profile" { + name = "vsphere-picard-2" + version = "1.0.0" + context = "tenant" +} +data "spectrocloud_cloudaccount_vsphere" "vmware_account" { + name = var.shared_vmware_cloud_account_name +} + + +resource "spectrocloud_cluster_vsphere" "cluster" { + name = "vsphere-picard-3" + cloud_account_id = data.spectrocloud_cloudaccount_vsphere.vmware_account.id + cluster_profile { + id = data.spectrocloud_cluster_profile.vmware_profile.id + } + cloud_config { + ssh_key = var.cluster_ssh_public_key + + datacenter = var.vsphere_datacenter + folder = var.vsphere_folder + // For Dynamic DNS (network_type & network_search_domain value should set for DDNS) + network_type = "DDNS" + network_search_domain = var.cluster_network_search + // For Static (By Default static_ip is false, for static provisioning, it is set to be true. Not required to specify network_type & network_search_domain) + # static_ip = true + } + + machine_pool { + control_plane = true + control_plane_as_worker = true + name = "master-pool" + count = 1 + placement { + cluster = var.vsphere_cluster + resource_pool = var.vsphere_resource_pool + datastore = var.vsphere_datastore + network = var.vsphere_network + } + instance_type { + disk_size_gb = 40 + memory_mb = 4096 + cpu = 2 + } + } + + machine_pool { + name = "worker-basic" + count = 1 + node_repave_interval = 30 + placement { + cluster = var.vsphere_cluster + resource_pool = var.vsphere_resource_pool + datastore = var.vsphere_datastore + network = var.vsphere_network + } + instance_type { + disk_size_gb = 40 + memory_mb = 8192 + cpu = 4 + } + } +} \ No newline at end of file diff --git a/go.sum b/go.sum deleted file mode 100644 index 97c52512..00000000 --- a/go.sum +++ /dev/null @@ -1,604 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-errors/errors v1.4.0 h1:2OA7MFw38+e9na72T1xgkomPb6GzZzzxvJ5U630FoRM= -github.com/go-errors/errors v1.4.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= -github.com/go-openapi/runtime v0.19.28 h1:9lYu6axek8LJrVkMVViVirRcpoaCxXX7+sSvmizGVnA= -github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc= -github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= -github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= -github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA= -github.com/hashicorp/go-plugin v1.4.6/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= -github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= -github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= -github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= -github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-docs v0.13.0 h1:6e+VIWsVGb6jYJewfzq2ok2smPzZrt1Wlm9koLeKazY= -github.com/hashicorp/terraform-plugin-docs v0.13.0/go.mod h1:W0oCmHAjIlTHBbvtppWHe8fLfZ2BznQbuv8+UD8OucQ= -github.com/hashicorp/terraform-plugin-go v0.14.1 h1:cwZzPYla82XwAqpLhSzdVsOMU+6H29tczAwrB0z9Zek= -github.com/hashicorp/terraform-plugin-go v0.14.1/go.mod h1:Bc/K6K26BQ2FHqIELPbpKtt2CzzbQou+0UQF3/0NsCQ= -github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= -github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 h1:zHcMbxY0+rFO9gY99elV/XC/UnQVg7FhRCbj1i5b7vM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1/go.mod h1:+tNlb0wkfdsDJ7JEiERLz4HzM19HyiuIoGzTsM7rPpw= -github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c h1:D8aRO6+mTqHfLsK/BC3j5OAoogv1WLRWzY1AaTo3rBg= -github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mitchellh/cli v1.1.4 h1:qj8czE26AU4PbiaPXK5uVmMSM+V5BYsFBiM9HhGRLUA= -github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= -github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spectrocloud/gomi v1.14.1-0.20230412095143-b0595c6c6f08 h1:AnOC0U+ExlKBeT5yF2Pg8PPfVOfxwOUBS/5deOl1Q4Y= -github.com/spectrocloud/gomi v1.14.1-0.20230412095143-b0595c6c6f08/go.mod h1:UnhUDpFEvtYh6m384r3xzj8/+Z6/hMp2O8whEMYVHec= -github.com/spectrocloud/hapi v1.14.1-0.20230521133257-d73b9e4aea65 h1:mz46BzwOQg49P+CEGeMQgD7W6u3s0DmixMT4mudNQeA= -github.com/spectrocloud/hapi v1.14.1-0.20230521133257-d73b9e4aea65/go.mod h1:9lX5c6bShSkAg24223A7XBCyJj4/Kr9w0YFv6Mf5ZlE= -github.com/spectrocloud/hapi v1.14.1-0.20230814141242-394093e7fedb h1:Y/TDXfEZ6GJbYi4bKxFS0HFvmOL6Xs653BoxuZjuKEQ= -github.com/spectrocloud/hapi v1.14.1-0.20230814141242-394093e7fedb/go.mod h1:O/Bkbw92QPSGPNQPqKt7Qlkn+9BKK/a22KTUlk76KHI= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= -go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= -google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/spectrocloud/cluster_common_hash.go b/spectrocloud/cluster_common_hash.go index b04a092b..050ed030 100644 --- a/spectrocloud/cluster_common_hash.go +++ b/spectrocloud/cluster_common_hash.go @@ -5,102 +5,122 @@ import ( "fmt" "hash/fnv" "sort" + "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func resourceMachinePoolAzureHash(v interface{}) int { +func CommonHash(nodePool map[string]interface{}) *bytes.Buffer { var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + if _, ok := nodePool["additional_labels"]; ok { + buf.WriteString(HashStringMap(nodePool["additional_labels"])) + } + if _, ok := nodePool["taints"]; ok { + buf.WriteString(HashStringMapList(nodePool["taints"])) + } + if val, ok := nodePool["control_plane"]; ok { + buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + } + if val, ok := nodePool["control_plane_as_worker"]; ok { + buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + } + if val, ok := nodePool["name"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + } + if val, ok := nodePool["count"]; ok { + buf.WriteString(fmt.Sprintf("%d-", val.(int))) + } + if val, ok := nodePool["update_strategy"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + } + if val, ok := nodePool["node_repave_interval"]; ok { + buf.WriteString(fmt.Sprintf("%d-", val.(int))) + } + /*if val, ok := nodePool["instance_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + } + if val, ok := nodePool["azs"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + }*/ + if val, ok := nodePool["min"]; ok { + buf.WriteString(fmt.Sprintf("%d-", val.(int))) + } + if val, ok := nodePool["max"]; ok { + buf.WriteString(fmt.Sprintf("%d-", val.(int))) + } - buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["is_system_node_pool"].(bool))) + return &buf +} - buf.WriteString(fmt.Sprintf("%s-", m["azs"].(*schema.Set).GoString())) +func resourceMachinePoolAzureHash(v interface{}) int { + m := v.(map[string]interface{}) + buf := CommonHash(m) - if m["os_type"] != "" { - buf.WriteString(fmt.Sprintf("%s-", m["os_type"].(string))) + if val, ok := m["instance_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) + } + if val, ok := m["is_system_node_pool"]; ok { + buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + } + if val, ok := m["os_type"]; ok && val != "" { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) } - - // TODO(saamalik) fix for disk - //buf.WriteString(fmt.Sprintf("%d-", d["size_gb"].(int))) - //buf.WriteString(fmt.Sprintf("%s-", d["type"].(string))) - - //d2 := m["disk"].([]interface{}) - //d := d2[0].(map[string]interface{}) return int(hash(buf.String())) } func resourceMachinePoolAksHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) + buf := CommonHash(m) - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) - - if m["min"] != nil { - buf.WriteString(fmt.Sprintf("%d-", m["min"].(int))) + if val, ok := m["instance_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) } - if m["max"] != nil { - buf.WriteString(fmt.Sprintf("%d-", m["max"].(int))) + if val, ok := m["disk_size_gb"]; ok { + buf.WriteString(fmt.Sprintf("%d-", val.(int))) + } + if val, ok := m["is_system_node_pool"]; ok { + buf.WriteString(fmt.Sprintf("%t-", val.(bool))) + } + if val, ok := m["storage_account_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", val.(string))) } - buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["disk_size_gb"].(int))) - buf.WriteString(fmt.Sprintf("%t-", m["is_system_node_pool"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["storage_account_type"].(string))) return int(hash(buf.String())) } func resourceMachinePoolGcpHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) - - buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["azs"].(*schema.Set).GoString())) + buf := CommonHash(m) return int(hash(buf.String())) } func resourceMachinePoolAwsHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) + buf := CommonHash(m) - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) - + if m["min"] != nil { + buf.WriteString(fmt.Sprintf("%d-", m["min"].(int))) + } + if m["max"] != nil { + buf.WriteString(fmt.Sprintf("%d-", m["max"].(int))) + } buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["capacity_type"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["max_price"].(string))) + if m["azs"] != nil { + azsSet := m["azs"].(*schema.Set) + azsList := azsSet.List() + azsListStr := make([]string, len(azsList)) + for i, v := range azsList { + azsListStr[i] = v.(string) + } + sort.Strings(azsListStr) + azsStr := strings.Join(azsListStr, "-") + buf.WriteString(fmt.Sprintf("%s-", azsStr)) + } buf.WriteString(fmt.Sprintf("%s-", m["azs"].(*schema.Set).GoString())) buf.WriteString(HashStringMap(m["az_subnets"])) @@ -108,17 +128,10 @@ func resourceMachinePoolAwsHash(v interface{}) int { } func resourceMachinePoolEksHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) + buf := CommonHash(m) - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) buf.WriteString(fmt.Sprintf("%d-", m["disk_size_gb"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) - if m["min"] != nil { buf.WriteString(fmt.Sprintf("%d-", m["min"].(int))) } @@ -133,30 +146,50 @@ func resourceMachinePoolEksHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-%s", i, j.(string))) } + if m["eks_launch_template"] != nil { + buf.WriteString(eksLaunchTemplate(m["eks_launch_template"])) + } + return int(hash(buf.String())) } -func resourceMachinePoolTkeHash(v interface{}) int { +func eksLaunchTemplate(v interface{}) string { var buf bytes.Buffer - m := v.(map[string]interface{}) + if len(v.([]interface{})) > 0 { + m := v.([]interface{})[0].(map[string]interface{}) - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) + if m["ami_id"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["ami_id"].(string))) + } + if m["root_volume_type"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["root_volume_type"].(string))) + } + if m["root_volume_iops"] != nil { + buf.WriteString(fmt.Sprintf("%d-", m["root_volume_iops"].(int))) + } + if m["root_volume_throughput"] != nil { + buf.WriteString(fmt.Sprintf("%d-", m["root_volume_throughput"].(int))) + } + if m["additional_security_groups"] != nil { + for _, sg := range m["additional_security_groups"].(*schema.Set).List() { + buf.WriteString(fmt.Sprintf("%s-", sg.(string))) + } + } + } - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["disk_size_gb"].(int))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + return buf.String() +} - if m["min"] != nil { - buf.WriteString(fmt.Sprintf("%d-", m["min"].(int))) - } - if m["max"] != nil { - buf.WriteString(fmt.Sprintf("%d-", m["max"].(int))) - } - buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["capacity_type"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["max_price"].(string))) +func resourceMachinePoolCoxEdgeHash(v interface{}) int { + m := v.(map[string]interface{}) + buf := CommonHash(m) + + return int(hash(buf.String())) +} + +func resourceMachinePoolTkeHash(v interface{}) int { + m := v.(map[string]interface{}) + buf := CommonHash(m) for i, j := range m["az_subnets"].(map[string]interface{}) { buf.WriteString(fmt.Sprintf("%s-%s", i, j.(string))) @@ -166,18 +199,8 @@ func resourceMachinePoolTkeHash(v interface{}) int { } func resourceMachinePoolVsphereHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - //d := m["disk"].([]interface{})[0].(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + buf := CommonHash(m) if v, found := m["instance_type"]; found { if len(v.([]interface{})) > 0 { @@ -188,21 +211,22 @@ func resourceMachinePoolVsphereHash(v interface{}) int { } } + if placements, found := m["placement"]; found { + for _, p := range placements.([]interface{}) { + place := p.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", place["cluster"].(string))) + buf.WriteString(fmt.Sprintf("%s-", place["resource_pool"].(string))) + buf.WriteString(fmt.Sprintf("%s-", place["datastore"].(string))) + buf.WriteString(fmt.Sprintf("%s-", place["network"].(string))) + buf.WriteString(fmt.Sprintf("%s-", place["static_ip_pool_id"].(string))) + } + } return int(hash(buf.String())) } func resourceMachinePoolOpenStackHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + buf := CommonHash(m) buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["subnet_id"].(string))) @@ -213,33 +237,15 @@ func resourceMachinePoolOpenStackHash(v interface{}) int { } func resourceMachinePoolVirtualHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + buf := CommonHash(m) return int(hash(buf.String())) } func resourceMachinePoolMaasHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + buf := CommonHash(m) if v, found := m["instance_type"]; found { if len(v.([]interface{})) > 0 { @@ -254,18 +260,8 @@ func resourceMachinePoolMaasHash(v interface{}) int { } func resourceMachinePoolLibvirtHash(v interface{}) int { - var buf bytes.Buffer m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - //d := m["disk"].([]interface{})[0].(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + buf := CommonHash(m) if v, found := m["xsl_template"]; found { buf.WriteString(fmt.Sprintf("%s-", v.(string))) @@ -274,73 +270,61 @@ func resourceMachinePoolLibvirtHash(v interface{}) int { if v, found := m["instance_type"]; found { if len(v.([]interface{})) > 0 { ins := v.([]interface{})[0].(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%d-", ins["cpu"].(int))) - buf.WriteString(fmt.Sprintf("%d-", ins["disk_size_gb"].(int))) - buf.WriteString(fmt.Sprintf("%d-", ins["memory_mb"].(int))) - buf.WriteString(fmt.Sprintf("%s-", ins["cpus_sets"].(string))) - if ins["cache_passthrough"] != nil { - buf.WriteString(fmt.Sprintf("%s-%t", "cache_passthrough", ins["cache_passthrough"].(bool))) - } - if ins["gpu_config"] != nil { - config, _ := ins["gpu_config"].(map[string]interface{}) - if config != nil { - buf.WriteString(fmt.Sprintf("%d-", config["num_gpus"].(int))) - buf.WriteString(fmt.Sprintf("%s-", config["device_model"].(string))) - buf.WriteString(fmt.Sprintf("%s-", config["vendor"].(string))) - buf.WriteString(HashStringMap(config["addresses"])) - } - } - - if ins["attached_disks"] != nil { - for _, disk := range ins["attached_disks"].([]interface{}) { - diskMap := disk.(map[string]interface{}) - if diskMap["managed"] != nil { - buf.WriteString(fmt.Sprintf("%s-%t", "managed", diskMap["managed"].(bool))) - } - if diskMap["size_in_gb"] != nil { - buf.WriteString(fmt.Sprintf("%s-%d", "size_in_gb", diskMap["size_in_gb"].(int))) - } - } - } + buf.WriteString(InstanceTypeHash(ins)) } } return int(hash(buf.String())) } -func resourceMachinePoolEdgeNativeHash(v interface{}) int { +func InstanceTypeHash(ins map[string]interface{}) string { var buf bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) - - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) - - if _, found := m["host_uids"]; found { - for _, host := range m["host_uids"].([]interface{}) { - buf.WriteString(fmt.Sprintf("%s-", host.(string))) + buf.WriteString(fmt.Sprintf("%d-", ins["cpu"].(int))) + buf.WriteString(fmt.Sprintf("%d-", ins["disk_size_gb"].(int))) + buf.WriteString(fmt.Sprintf("%d-", ins["memory_mb"].(int))) + buf.WriteString(fmt.Sprintf("%s-", ins["cpus_sets"].(string))) + if ins["cache_passthrough"] != nil { + buf.WriteString(fmt.Sprintf("%s-%t", "cache_passthrough", ins["cache_passthrough"].(bool))) + } + if ins["gpu_config"] != nil { + config, _ := ins["gpu_config"].(map[string]interface{}) + if config != nil { + buf.WriteString(GpuConfigHash(config)) } } - return int(hash(buf.String())) + if ins["attached_disks"] != nil { + for _, disk := range ins["attached_disks"].([]interface{}) { + diskMap := disk.(map[string]interface{}) + if diskMap["managed"] != nil { + buf.WriteString(fmt.Sprintf("%s-%t", "managed", diskMap["managed"].(bool))) + } + if diskMap["size_in_gb"] != nil { + buf.WriteString(fmt.Sprintf("%s-%d", "size_in_gb", diskMap["size_in_gb"].(int))) + } + } + } + return buf.String() } -func resourceMachinePoolEdgeHash(v interface{}) int { +func GpuConfigHash(config map[string]interface{}) string { var buf bytes.Buffer - m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", config["num_gpus"].(int))) + buf.WriteString(fmt.Sprintf("%s-", config["device_model"].(string))) + buf.WriteString(fmt.Sprintf("%s-", config["vendor"].(string))) + buf.WriteString(HashStringMap(config["addresses"])) + return buf.String() +} - buf.WriteString(HashStringMap(m["additional_labels"])) - buf.WriteString(HashStringMapList(m["taints"])) +func resourceMachinePoolEdgeNativeHash(v interface{}) int { + m := v.(map[string]interface{}) + buf := CommonHash(m) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) - buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + if _, found := m["host_uids"]; found { + for _, host := range m["host_uids"].([]interface{}) { + buf.WriteString(fmt.Sprintf("%s-", host.(string))) + } + } return int(hash(buf.String())) } @@ -368,7 +352,7 @@ func HashStringMapList(v interface{}) string { hashes = append(hashes, HashStringMap(i)) } - sortedHashes := make([]string, len(hashes), len(hashes)) + sortedHashes := make([]string, len(hashes)) copy(sortedHashes, hashes) sort.Strings(sortedHashes) @@ -392,7 +376,7 @@ func HashStringMap(v interface{}) string { keys = append(keys, k) } - sortedKeys := make([]string, len(keys), len(keys)) + sortedKeys := make([]string, len(keys)) copy(sortedKeys, keys) sort.Strings(sortedKeys) diff --git a/spectrocloud/cluster_common_hash_test.go b/spectrocloud/cluster_common_hash_test.go new file mode 100644 index 00000000..fe50da64 --- /dev/null +++ b/spectrocloud/cluster_common_hash_test.go @@ -0,0 +1,651 @@ +package spectrocloud + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "testing" + + "github.com/stretchr/testify/assert" +) + +func commonNodePool() map[string]interface{} { + nodePool := map[string]interface{}{ + "additional_labels": map[string]interface{}{ + "label1": "value1", + }, + "taints": []interface{}{ + map[string]interface{}{ + "key": "taint1", + "value": "true", + "effect": "NoSchedule", + }, + }, + "control_plane": true, + "control_plane_as_worker": false, + "name": "test-pool", + "count": 3, + "update_strategy": "RollingUpdate", + "node_repave_interval": 10, + } + return nodePool +} + +func TestCommonHash(t *testing.T) { + + expectedHash := "label1-value1effect-NoSchedulekey-taint1value-truetrue-false-test-pool-3-RollingUpdate-10-" + hash := CommonHash(commonNodePool()).String() + + assert.Equal(t, expectedHash, hash) +} + +func TestResourceMachinePoolAzureHash(t *testing.T) { + nodePool := map[string]interface{}{ + "additional_labels": map[string]interface{}{ + "label1": "value1", + }, + "taints": []interface{}{ + map[string]interface{}{ + "key": "taint1", + "value": "true", + "effect": "NoSchedule", + }, + }, + "control_plane": true, + "control_plane_as_worker": false, + "name": "test-pool", + "count": 3, + "update_strategy": "RollingUpdate", + "node_repave_interval": 10, + "instance_type": "Standard_D2_v3", + "is_system_node_pool": true, + "os_type": "Linux", + } + + expectedHash := 3495386805 + + hash := resourceMachinePoolAzureHash(nodePool) + + assert.Equal(t, expectedHash, hash) +} + +func TestResourceClusterHash(t *testing.T) { + clusterData := map[string]interface{}{ + "uid": "abc123", + } + + expectedHash := 1764273400 + + hash := resourceClusterHash(clusterData) + + assert.Equal(t, expectedHash, hash) +} + +func TestHashStringMapList(t *testing.T) { + stringMapList := []interface{}{ + map[string]interface{}{"key1": "value1", "key2": "value2"}, + map[string]interface{}{"key3": "value3"}, + } + + expectedHash := "key1-value1key2-value2key3-value3" + hash := HashStringMapList(stringMapList) + + assert.Equal(t, expectedHash, hash) +} + +func TestHashStringMapListlength(t *testing.T) { + stringMapList := []interface{}{} + + expectedHash := "" + hash := HashStringMapList(stringMapList) + + assert.Equal(t, expectedHash, hash) +} + +func TestResourceMachinePoolAksHash(t *testing.T) { + testCases := []struct { + name string + input interface{} + expected int + }{ + { + name: "Test Valid ResourceMachinePoolAksHash", + input: map[string]interface{}{ + "instance_type": "Standard_D2s_v3", + "disk_size_gb": 100, + "is_system_node_pool": true, + "storage_account_type": "Premium_LRS", + }, + expected: 380130606, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := resourceMachinePoolAksHash(tc.input) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestResourceMachinePoolGcpHash(t *testing.T) { + testCases := []struct { + input interface{} + expected int + }{ + { + input: map[string]interface{}{ + "instance_type": "n1-standard-4", + "min": 1, + "max": 3, + "capacity_type": "ON_DEMAND", + "max_price": "0.12", + "azs": []string{"us-central1-a", "us-central1-b"}, + "az_subnets": map[string]interface{}{ + "us-central1-a": "subnet-1", + "us-central1-b": "subnet-2", + }, + }, + expected: 1198721703, + }, + } + for _, tc := range testCases { + actual := resourceMachinePoolGcpHash(tc.input) + if actual != tc.expected { + t.Errorf("Expected hash %d, but got %d for input %+v", tc.expected, actual, tc.input) + } + } +} + +func TestResourceMachinePoolAwsHash(t *testing.T) { + testCases := []struct { + input interface{} + expected int + }{ + { + input: map[string]interface{}{ + "min": 1, + "max": 5, + "instance_type": "t2.micro", + "capacity_type": "ON_DEMAND", + "max_price": "0.03", + "azs": schema.NewSet(schema.HashString, []interface{}{ + "us-east-1a", + "us-east-1b", + }), + + "az_subnets": map[string]interface{}{ + "us-east-1a": "subnet-1", + "us-east-1b": "subnet-2", + }, + }, + expected: 1929542909, + }, + } + + for _, tc := range testCases { + actual := resourceMachinePoolAwsHash(tc.input) + if actual != tc.expected { + t.Errorf("Expected hash %d, but got %d for input %+v", tc.expected, actual, tc.input) + } + } +} + +func TestResourceMachinePoolEksHash(t *testing.T) { + + testCases := []struct { + input interface{} + expected int + }{ + { + input: map[string]interface{}{ + "disk_size_gb": 100, + "min": 2, + "max": 5, + "instance_type": "t2.micro", + "capacity_type": "on-demand", + "max_price": "0.05", + "az_subnets": map[string]interface{}{ + "subnet1": "subnet-123", + "subnet2": "subnet-456", + }, + "eks_launch_template": []interface{}{ + map[string]interface{}{ + "ami_id": "ami-123", + "root_volume_type": "gp2", + }, + }, + }, + expected: 456946481, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Input: %v", tc.input), func(t *testing.T) { + // Call the function with the test input + result := resourceMachinePoolEksHash(tc.input) + + // Check if the result matches the expected output + if result != tc.expected { + t.Errorf("Expected: %d, Got: %d", tc.expected, result) + } + }) + } +} + +func TestEksLaunchTemplate(t *testing.T) { + + testCases := []struct { + input interface{} + expected string + }{ + { + + input: []interface{}{ + map[string]interface{}{ + "ami_id": "ami-123", + "root_volume_type": "gp2", + "root_volume_iops": 100, + "root_volume_throughput": 200, + "additional_security_groups": schema.NewSet(schema.HashString, []interface{}{"sg-123", "sg-456"}), + }, + }, + expected: "ami-123-gp2-100-200-sg-456-sg-123-", + }, + { + // Test case with invalid input type (slice of non-map) + input: []interface{}{}, + expected: "", + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Input: %v", tc.input), func(t *testing.T) { + // Call the function with the test input + result := eksLaunchTemplate(tc.input) + + // Check if the result matches the expected output + if result != tc.expected { + t.Errorf("Expected: %s, Got: %s", tc.expected, result) + } + }) + } +} + +func TestResourceMachinePoolCoxEdgeHash(t *testing.T) { + + testCases := []struct { + input map[string]interface{} + expected int + }{ + { + + input: commonNodePool(), + expected: 513591628, + }, + { + // Test case with empty input + input: nil, + expected: 2166136261, + }, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + // Call the function with the test input + result := resourceMachinePoolCoxEdgeHash(tc.input) + + // Check if the result matches the expected output + if result != tc.expected { + t.Errorf("Expected: %d, Got: %d", tc.expected, result) + } + }) + } +} + +func TestResourceMachinePoolTkeHash(t *testing.T) { + testCases := []struct { + input map[string]interface{} + expected int + }{ + { + + input: map[string]interface{}{ + "az_subnets": map[string]interface{}{ + "subnet1": "10.0.0.1", + "subnet2": "10.0.0.2", + }, + }, + expected: 3634270287, + }, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + // Call the function with the test input + result := resourceMachinePoolTkeHash(tc.input) + + // Check if the result matches the expected output + if result != tc.expected { + t.Errorf("Expected: %d, Got: %d", tc.expected, result) + } + }) + } +} + +func TestResourceMachinePoolVsphereHash(t *testing.T) { + + testCases := []struct { + input interface{} + expected int + }{ + { + input: map[string]interface{}{ + "instance_type": []interface{}{ + map[string]interface{}{ + "cpu": 2, + "disk_size_gb": 50, + "memory_mb": 4096, + }, + }, + "placement": []interface{}{ + map[string]interface{}{ + "cluster": "cluster1", + "resource_pool": "resource_pool1", + "datastore": "datastore1", + "network": "network1", + "static_ip_pool_id": "static_pool1", + }, + }, + }, + expected: 556255137, + }, + { + // Test case with missing instance_type + input: map[string]interface{}{ + "placement": []interface{}{ + map[string]interface{}{ + "cluster": "cluster2", + "resource_pool": "resource_pool2", + "datastore": "datastore2", + "network": "network2", + "static_ip_pool_id": "static_pool2", + }, + }, + }, + expected: 3826670463, + }, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + // Call the function with the test input + result := resourceMachinePoolVsphereHash(tc.input) + + // Check if the result matches the expected output + if result != tc.expected { + t.Errorf("Expected: %d, Got: %d", tc.expected, result) + } + }) + } +} + +func TestResourceMachinePoolEdgeNativeHash(t *testing.T) { + + testCases := []struct { + input interface{} + expected int + }{ + { + input: map[string]interface{}{ + "host_uids": []interface{}{"host1", "host2", "host3"}, + }, + expected: 456992116, + }, + { + input: map[string]interface{}{}, + expected: 2166136261, + }, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + result := resourceMachinePoolEdgeNativeHash(tc.input) + + if result != tc.expected { + t.Errorf("Expected: %d, Got: %d", tc.expected, result) + } + }) + } +} + +func TestGpuConfigHash(t *testing.T) { + + testCases := []struct { + input map[string]interface{} + expected string + }{ + { + + input: map[string]interface{}{ + "num_gpus": 2, + "device_model": "model1", + "vendor": "vendor1", + "addresses": map[string]interface{}{ + "address1": "value1", + "address2": "value2", + }, + }, + expected: "2-model1-vendor1-address1-value1address2-value2", + }, + { + // Test case with missing "addresses" key + input: map[string]interface{}{ + "num_gpus": 1, + "device_model": "model2", + "vendor": "vendor2", + }, + expected: "1-model2-vendor2-", + }, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + result := GpuConfigHash(tc.input) + + if result != tc.expected { + t.Errorf("Expected: %s, Got: %s", tc.expected, result) + } + }) + } +} + +func TestInstanceTypeHash(t *testing.T) { + testCases := []struct { + name string + input map[string]interface{} + expectedHash string + }{ + { + name: "Valid InstanceTypeHash", + input: map[string]interface{}{ + "cpu": 4, + "disk_size_gb": 100, + "memory_mb": 8192, + "cpus_sets": "0-3", + "cache_passthrough": true, + "gpu_config": map[string]interface{}{ + "num_gpus": 2, + "device_model": "Tesla T4", + "vendor": "NVIDIA", + "addresses": map[string]interface{}{ + "gpu-address-1": "10.0.0.1", + "gpu-address-2": "10.0.0.2", + }, + }, + "attached_disks": []interface{}{ + map[string]interface{}{ + "managed": true, + "size_in_gb": 500, + }, + }, + }, + expectedHash: "4-100-8192-0-3-cache_passthrough-true2-Tesla T4-NVIDIA-gpu-address-1-10.0.0.1gpu-address-2-10.0.0.2managed-truesize_in_gb-500", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hash := InstanceTypeHash(tc.input) + assert.Equal(t, tc.expectedHash, hash) + }) + } +} + +func TestResourceMachinePoolLibvirtHash(t *testing.T) { + testCases := []struct { + name string + input interface{} + expectedHash int + }{ + { + name: "Valid MachinePoolLibvirtHash", + input: map[string]interface{}{ + "xsl_template": "xsl-template-1", + "instance_type": []interface{}{ + map[string]interface{}{ + "cpu": 4, + "disk_size_gb": 100, + "memory_mb": 8192, + "cpus_sets": "0-3", + "cache_passthrough": true, + "gpu_config": map[string]interface{}{ + "num_gpus": 2, + "device_model": "Tesla T4", + "vendor": "NVIDIA", + "addresses": map[string]interface{}{ + "gpu-address-1": "10.0.0.1", + "gpu-address-2": "10.0.0.2", + }, + }, + "attached_disks": []interface{}{ + map[string]interface{}{ + "managed": true, + "size_in_gb": 500, + }, + }, + }, + }, + }, + expectedHash: 3451728783, + }, + { + name: "Test Case 2", + input: map[string]interface{}{}, + expectedHash: 2166136261, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hash := resourceMachinePoolLibvirtHash(tc.input) + assert.Equal(t, tc.expectedHash, hash) + }) + } +} + +func TestResourceMachinePoolMaasHash(t *testing.T) { + testCases := []struct { + name string + input interface{} + expectedHash int + }{ + { + name: "Valid MachinePoolMaasHash", + input: map[string]interface{}{ + "instance_type": []interface{}{ + map[string]interface{}{ + "min_cpu": 2, + "min_memory_mb": 4096, + }, + }, + "azs": schema.NewSet(schema.HashString, []interface{}{"az1", "az2"}), + }, + expectedHash: 3363048657, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hash := resourceMachinePoolMaasHash(tc.input) + assert.Equal(t, tc.expectedHash, hash) + }) + } +} + +func TestResourceMachinePoolVirtualHash(t *testing.T) { + testCases := []struct { + name string + input interface{} + expectedHash int + }{ + { + name: "Valid MachinePoolVirtualHash", + input: map[string]interface{}{ + "key1": "value1", + "key2": 123, + }, + expectedHash: 2166136261, + }, + { + name: "Test Case 2", + input: map[string]interface{}{ + "key3": "value3", + "key4": true, + }, + expectedHash: 2166136261, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hash := resourceMachinePoolVirtualHash(tc.input) + assert.Equal(t, tc.expectedHash, hash) + }) + } +} + +func TestResourceMachinePoolOpenStackHash(t *testing.T) { + testCases := []struct { + name string + input interface{} + expectedHash int + }{ + { + name: "Valid MachinePoolOpenStackHash", + input: map[string]interface{}{ + "instance_type": "flavor1", + "subnet_id": "subnet123", + "update_strategy": "RollingUpdate", + "azs": schema.NewSet(schema.HashString, []interface{}{"az1", "az2"}), + }, + expectedHash: 3148662768, + }, + { + name: "Valid MachinePoolOpenStackHash 2", + input: map[string]interface{}{ + "instance_type": "flavor2", + "subnet_id": "subnet456", + "update_strategy": "Recreate", + "azs": schema.NewSet(schema.HashString, []interface{}{"az3"}), + }, + expectedHash: 4045757255, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hash := resourceMachinePoolOpenStackHash(tc.input) + assert.Equal(t, tc.expectedHash, hash) + }) + } +} diff --git a/spectrocloud/cluster_common_taints.go b/spectrocloud/cluster_common_taints.go index 2901af21..da09ed9b 100644 --- a/spectrocloud/cluster_common_taints.go +++ b/spectrocloud/cluster_common_taints.go @@ -47,8 +47,8 @@ func flattenClusterTaints(items []*models.V1Taint) []interface{} { return result } -func SetAdditionalLabelsAndTaints(labels map[string]string, intaints []*models.V1Taint, oi map[string]interface{}) { - if labels == nil || len(labels) == 0 { +func FlattenAdditionalLabelsAndTaints(labels map[string]string, intaints []*models.V1Taint, oi map[string]interface{}) { + if len(labels) == 0 { oi["additional_labels"] = make(map[string]interface{}) } else { oi["additional_labels"] = labels diff --git a/spectrocloud/cluster_node_common.go b/spectrocloud/cluster_node_common.go new file mode 100644 index 00000000..e0ce2a3b --- /dev/null +++ b/spectrocloud/cluster_node_common.go @@ -0,0 +1,119 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/palette-sdk-go/client" + "log" + "time" +) + +var NodeMaintenanceLifecycleStates = []string{ + "Completed", + "InProgress", + "Initiated", + "Failed", +} + +type GetMaintenanceStatus func(string, string, string, string) (*models.V1MachineMaintenanceStatus, error) + +type GetNodeStatusMap func(string, string, string) (map[string]models.V1CloudMachineStatus, error) + +func waitForNodeMaintenanceCompleted(c *client.V1Client, ctx context.Context, fn GetMaintenanceStatus, ClusterContext string, ConfigUID string, MachineName string, NodeId string) (error, bool) { + + stateConf := &retry.StateChangeConf{ + Delay: 30 * time.Second, + Pending: NodeMaintenanceLifecycleStates, + Target: []string{"Completed"}, + Refresh: resourceClusterNodeMaintenanceRefreshFunc(c, fn, ClusterContext, ConfigUID, MachineName, NodeId), + Timeout: 30 * time.Minute, + MinTimeout: 10 * time.Second, + } + + // Wait, catching any errors + _, err := stateConf.WaitForStateContext(ctx) + if err != nil { + return err, true + } + return nil, false +} + +func resourceClusterNodeMaintenanceRefreshFunc(c *client.V1Client, fn GetMaintenanceStatus, ClusterContext string, ConfigUID string, MachineName string, NodeId string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + nmStatus, err := c.GetNodeMaintenanceStatus(client.GetMaintenanceStatus(fn), ClusterContext, ConfigUID, MachineName, NodeId) + if err != nil { + return nil, "", err + } + + state := nmStatus.State + log.Printf("Node maintenance state (%s): %s", NodeId, state) + + return nmStatus, state, nil + } +} + +func resourceNodeAction(c *client.V1Client, ctx context.Context, newMachinePool interface{}, fn GetMaintenanceStatus, CloudType string, ClusterContext string, ConfigUID string, MachineName string) error { + newNodes := newMachinePool.(map[string]interface{})["node"] + if newNodes != nil { + for _, n := range newNodes.([]interface{}) { + node := n.(map[string]interface{}) + nodeMaintenanceStatus, err := c.GetNodeMaintenanceStatus(client.GetMaintenanceStatus(fn), ClusterContext, ConfigUID, MachineName, node["node_id"].(string)) + if err != nil { + return err + } + if node["action"] != nodeMaintenanceStatus.Action { + nm := &models.V1MachineMaintenance{ + Action: node["action"].(string), + } + err := c.ToggleMaintenanceOnNode(nm, CloudType, ClusterContext, ConfigUID, MachineName, node["node_id"].(string)) + if err != nil { + return err + } + err, isError := waitForNodeMaintenanceCompleted(c, ctx, fn, ClusterContext, ConfigUID, MachineName, node["node_id"].(string)) + if isError { + return err + } + } + } + } + return nil +} + +func flattenNodeMaintenanceStatus(c *client.V1Client, d *schema.ResourceData, fn GetNodeStatusMap, mPools []interface{}, cloudConfigId string, ClusterContext string) ([]interface{}, error) { + _, n := d.GetChange("machine_pool") + nsMap := make(map[string]interface{}) + for _, mp := range n.(*schema.Set).List() { + machinePool := mp.(map[string]interface{}) + nsMap[machinePool["name"].(string)] = machinePool + } + + for i, mp := range mPools { + m := mp.(map[string]interface{}) + // For handling unit test + if _, ok := nsMap[m["name"].(string)]; !ok { + return mPools, nil + } + + newNodeList := nsMap[m["name"].(string)].(map[string]interface{})["node"].([]interface{}) + if len(newNodeList) > 0 { + var nodes []interface{} + nodesStatus, err := fn(cloudConfigId, m["name"].(string), ClusterContext) + if err != nil { + return nil, err + } + for key, value := range nodesStatus { + for _, newNode := range newNodeList { + if newNode.(map[string]interface{})["node_id"] == key { + nodes = append(nodes, c.GetNodeValue(key, value.MaintenanceStatus.Action)) + } + } + } + if nodes != nil { + mPools[i].(map[string]interface{})["node"] = nodes + } + } + } + return mPools, nil +} diff --git a/spectrocloud/provider.go b/spectrocloud/provider.go index 58231e6f..0c35ecc6 100644 --- a/spectrocloud/provider.go +++ b/spectrocloud/provider.go @@ -24,6 +24,7 @@ func New(_ string) func() *schema.Provider { "username": { Type: schema.TypeString, Optional: true, + Deprecated: "Deprecated since 0.15.0 use `api_key` instead.", Description: "The Spectro Cloud username. Can also be set with the `SPECTROCLOUD_USERNAME` environment variable.", DefaultFunc: schema.EnvDefaultFunc("SPECTROCLOUD_USERNAME", nil), }, @@ -31,6 +32,7 @@ func New(_ string) func() *schema.Provider { Type: schema.TypeString, Optional: true, Sensitive: true, + Deprecated: "Deprecated since 0.15.0 use `api_key` instead.", Description: "The Spectro Cloud user password. Can also be set with the `SPECTROCLOUD_PASSWORD` environment variable.", DefaultFunc: schema.EnvDefaultFunc("SPECTROCLOUD_PASSWORD", nil), }, diff --git a/spectrocloud/resource_cluster_aks.go b/spectrocloud/resource_cluster_aks.go index a7af33e1..32ee7b3d 100644 --- a/spectrocloud/resource_cluster_aks.go +++ b/spectrocloud/resource_cluster_aks.go @@ -491,7 +491,7 @@ func flattenMachinePoolConfigsAks(machinePools []*models.V1AzureMachinePoolConfi for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) if *machinePool.IsControlPlane { continue diff --git a/spectrocloud/resource_cluster_aws.go b/spectrocloud/resource_cluster_aws.go index bfde3d5b..7fb3d967 100644 --- a/spectrocloud/resource_cluster_aws.go +++ b/spectrocloud/resource_cluster_aws.go @@ -2,17 +2,19 @@ package spectrocloud import ( "context" + "log" + "sort" + "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" "github.com/spectrocloud/terraform-provider-spectrocloud/types" - "log" - "strings" - "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + "github.com/spectrocloud/palette-sdk-go/client" ) func resourceClusterAws() *schema.Resource { @@ -21,6 +23,7 @@ func resourceClusterAws() *schema.Resource { ReadContext: resourceClusterAwsRead, UpdateContext: resourceClusterAwsUpdate, DeleteContext: resourceClusterDelete, + Description: "Resource for managing AWS clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -35,6 +38,12 @@ func resourceClusterAws() *schema.Resource { Required: true, ForceNew: true, }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + }, "tags": { Type: schema.TypeSet, Optional: true, @@ -42,74 +51,9 @@ func resourceClusterAws() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile_id": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Switch to cluster_profile", - }, - "cluster_profile": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{"cluster_profile_id", "pack"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "spectro", - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // UI strips the trailing newline on save - return strings.TrimSpace(old) == strings.TrimSpace(new) - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, + "cluster_profile": schemas.ClusterProfileSchema(), "apply_setting": { Type: schema.TypeString, Optional: true, @@ -120,26 +64,33 @@ func resourceClusterAws() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, + Description: "The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", }, "cloud_config": { Type: schema.TypeList, @@ -166,30 +117,6 @@ func resourceClusterAws() *schema.Resource { }, }, }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, "machine_pool": { Type: schema.TypeSet, Required: true, @@ -203,48 +130,49 @@ func resourceClusterAws() *schema.Resource { Type: schema.TypeString, }, }, - "taints": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "effect": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, + "taints": schemas.ClusterTaintsSchema(), "control_plane": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, + //ForceNew: true, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, Required: true, }, "count": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", }, "instance_type": { Type: schema.TypeString, Required: true, }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "Minimum number of nodes in the machine pool. This is used for autoscaling the machine pool.", + }, + "max": { + Type: schema.TypeInt, + Optional: true, + Description: "Maximum number of nodes in the machine pool. This is used for autoscaling the machine pool.", + }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", + }, "capacity_type": { Type: schema.TypeString, Default: "on-demand", @@ -257,9 +185,11 @@ func resourceClusterAws() *schema.Resource { Optional: true, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), }, "disk_size_gb": { Type: schema.TypeInt, @@ -285,140 +215,29 @@ func resourceClusterAws() *schema.Resource { Required: true, }, }, - }, - }, - }, - "backup_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "backup_location_id": { - Type: schema.TypeString, - Required: true, - }, - "schedule": { - Type: schema.TypeString, - Required: true, - }, - "expiry_in_hour": { - Type: schema.TypeInt, - Required: true, - }, - "include_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "include_cluster_resources": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "namespaces": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "scan_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configuration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "penetration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "conformance_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cluster_rbac_binding": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - "role": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "subjects": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "namespaces": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "resource_allocation": { - Type: schema.TypeMap, - Required: true, + "additional_security_groups": { + Type: schema.TypeSet, + Set: schema.HashString, Elem: &schema.Schema{ Type: schema.TypeString, }, + Optional: true, + Description: "Additional security groups to attach to the instance.", }, }, }, }, - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchemaComputed(), + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchemaComputed(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", }, }, } @@ -430,14 +249,18 @@ func resourceClusterAwsCreate(ctx context.Context, d *schema.ResourceData, m int // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster := toAwsCluster(c, d) + cluster, err := toAwsCluster(c, d) + if err != nil { + return diag.FromErr(err) + } - uid, err := c.CreateClusterAws(cluster) + ClusterContext := d.Get("context").(string) + uid, err := c.CreateClusterAws(cluster, ClusterContext) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) if isError { return diagnostics } @@ -452,10 +275,8 @@ func resourceClusterAwsRead(_ context.Context, d *schema.ResourceData, m interfa c := m.(*client.V1Client) var diags diag.Diagnostics - // - uid := d.Id() - // - cluster, err := c.GetCluster(uid) + + cluster, err := resourceClusterRead(d, c, diags) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -473,10 +294,11 @@ func resourceClusterAwsRead(_ context.Context, d *schema.ResourceData, m interfa } func flattenCloudConfigAws(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigAws(configUID); err != nil { + if config, err := c.GetCloudConfigAws(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsAws(config.Spec.MachinePoolConfig) @@ -499,14 +321,16 @@ func flattenMachinePoolConfigsAws(machinePools []*models.V1AwsMachinePoolConfig) for i, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = int(machinePool.Size) flattenUpdateStrategy(machinePool.UpdateStrategy, oi) + oi["min"] = int(machinePool.MinSize) + oi["max"] = int(machinePool.MaxSize) oi["instance_type"] = machinePool.InstanceType if machinePool.CapacityType != nil { oi["capacity_type"] = machinePool.CapacityType @@ -520,9 +344,36 @@ func flattenMachinePoolConfigsAws(machinePools []*models.V1AwsMachinePoolConfig) } else { oi["azs"] = machinePool.Azs } + + if machinePool.AdditionalSecurityGroups != nil && len(machinePool.AdditionalSecurityGroups) > 0 { + additionalSecuritygroup := make([]string, 0) + for _, sg := range machinePool.AdditionalSecurityGroups { + additionalSecuritygroup = append(additionalSecuritygroup, sg.ID) + } + oi["additional_security_groups"] = additionalSecuritygroup + } + ois[i] = oi } + sort.SliceStable(ois, func(i, j int) bool { + var controlPlaneI, controlPlaneJ bool + if ois[i].(map[string]interface{})["control_plane"] != nil { + controlPlaneI = ois[i].(map[string]interface{})["control_plane"].(bool) + } + if ois[j].(map[string]interface{})["control_plane"] != nil { + controlPlaneJ = ois[j].(map[string]interface{})["control_plane"].(bool) + } + + // If both are control planes or both are not, sort by name + if controlPlaneI == controlPlaneJ { + return ois[i].(map[string]interface{})["name"].(string) < ois[j].(map[string]interface{})["name"].(string) + } + + // Otherwise, control planes come first + return controlPlaneI && !controlPlaneJ + }) + return ois } @@ -533,7 +384,7 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - + ClusterContext := d.Get("context").(string) if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -554,27 +405,34 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - name := machinePoolResource["name"].(string) - if name != "" { - hash := resourceMachinePoolAwsHash(machinePoolResource) - vpcId := d.Get("cloud_config").([]interface{})[0].(map[string]interface{})["vpc_id"] - machinePool := toMachinePoolAws(machinePoolResource, vpcId.(string)) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolAws(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolAwsHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolAws(cloudConfigId, machinePool) - } - - if err != nil { - return diag.FromErr(err) + // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 + if machinePoolResource["name"].(string) != "" { + name := machinePoolResource["name"].(string) + if name != "" { + hash := resourceMachinePoolAwsHash(machinePoolResource) + vpcId := d.Get("cloud_config").([]interface{})[0].(map[string]interface{})["vpc_id"] + + var err error + machinePool, err := toMachinePoolAws(machinePoolResource, vpcId.(string)) + if err != nil { + return diag.FromErr(err) + } + + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolAws(cloudConfigId, machinePool, ClusterContext) + } else if hash != resourceMachinePoolAwsHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + err = c.UpdateMachinePoolAws(cloudConfigId, machinePool, ClusterContext) + } + + if err != nil { + return diag.FromErr(err) + } + + // Processed (if exists) + delete(osMap, name) } - - // Processed (if exists) - delete(osMap, name) } } @@ -583,15 +441,11 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolAws(cloudConfigId, name); err != nil { + if err := c.DeleteMachinePoolAws(cloudConfigId, name, ClusterContext); err != nil { return diag.FromErr(err) } } } - //TODO(saamalik) update for cluster as well - //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - // return diag.FromErr(err) - //} diagnostics, done := updateCommonFields(d, c) if done { @@ -603,10 +457,14 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int return diags } -func toAwsCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroAwsClusterEntity { +func toAwsCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroAwsClusterEntity, error) { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) + profiles, err := toProfiles(c, d) + if err != nil { + return nil, err + } cluster := &models.V1SpectroAwsClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -615,7 +473,7 @@ func toAwsCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroA }, Spec: &models.V1SpectroAwsClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: toProfiles(c, d), + Profiles: profiles, Policies: toPolicies(d), CloudConfig: &models.V1AwsClusterConfig{ SSHKeyName: cloudConfig["ssh_key_name"].(string), @@ -625,20 +483,35 @@ func toAwsCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroA }, } - //for _, machinePool := range d.Get("machine_pool").([]interface{}) { machinePoolConfigs := make([]*models.V1AwsMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { - mp := toMachinePoolAws(machinePool, cluster.Spec.CloudConfig.VpcID) + mp, err := toMachinePoolAws(machinePool, cluster.Spec.CloudConfig.VpcID) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } + sort.SliceStable(machinePoolConfigs, func(i, j int) bool { + controlPlaneI := machinePoolConfigs[i].PoolConfig.IsControlPlane + controlPlaneJ := machinePoolConfigs[j].PoolConfig.IsControlPlane + + // If both are control planes or both are not, sort by name + if controlPlaneI == controlPlaneJ { + return *machinePoolConfigs[i].PoolConfig.Name < *machinePoolConfigs[j].PoolConfig.Name + } + + // Otherwise, control planes come first + return controlPlaneI && !controlPlaneJ + }) + cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } -func toMachinePoolAws(machinePool interface{}, vpcId string) *models.V1AwsMachinePoolConfigEntity { +func toMachinePoolAws(machinePool interface{}, vpcId string) (*models.V1AwsMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -668,6 +541,17 @@ func toMachinePoolAws(machinePool interface{}, vpcId string) *models.V1AwsMachin azs = append(azs, az.(string)) } } + min := int32(m["count"].(int)) + max := int32(m["count"].(int)) + + if m["min"] != nil { + min = int32(m["min"].(int)) + } + + if m["max"] != nil { + max = int32(m["max"].(int)) + } + mp := &models.V1AwsMachinePoolConfigEntity{ CloudConfig: &models.V1AwsMachinePoolCloudConfigEntity{ Azs: azs, @@ -686,10 +570,26 @@ func toMachinePoolAws(machinePool interface{}, vpcId string) *models.V1AwsMachin UpdateStrategy: &models.V1UpdateStrategy{ Type: getUpdateStrategy(m), }, + MinSize: min, + MaxSize: max, UseControlPlaneAsWorker: controlPlaneAsWorker, }, } + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + + } + if capacityType == "spot" { maxPrice := "0.0" // default value if m["max_price"] != nil && len(m["max_price"].(string)) > 0 { @@ -700,5 +600,10 @@ func toMachinePoolAws(machinePool interface{}, vpcId string) *models.V1AwsMachin MaxPrice: maxPrice, } } - return mp + + if m["additional_security_groups"] != nil { + mp.CloudConfig.AdditionalSecurityGroups = setAdditionalSecurityGroups(m) + } + + return mp, nil } diff --git a/spectrocloud/resource_cluster_aws_expand_test.go b/spectrocloud/resource_cluster_aws_expand_test.go new file mode 100644 index 00000000..3352a7f1 --- /dev/null +++ b/spectrocloud/resource_cluster_aws_expand_test.go @@ -0,0 +1,79 @@ +package spectrocloud + +import ( + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/hapi/models" + + "github.com/spectrocloud/terraform-provider-spectrocloud/types" +) + +func TestToMachinePoolAws(t *testing.T) { + testCases := []struct { + name string + input map[string]interface{} + vpcId string + expected *models.V1AwsMachinePoolConfigEntity + }{ + { + name: "Test 1: Basic test case", + input: map[string]interface{}{ + "control_plane": false, + "control_plane_as_worker": false, + "name": "testPool", + "count": 3, + "instance_type": "t2.micro", + "min": 1, + "max": 5, + "capacity_type": "on-demand", + "update_strategy": "RollingUpdateScaleOut", + "disk_size_gb": 65, + "azs": schema.NewSet(schema.HashString, []interface{}{"us-west-1a", "us-west-1b"}), + "additional_security_groups": schema.NewSet(schema.HashString, []interface{}{"sg-12345", "sg-67890"}), + }, + vpcId: "vpc-12345", + expected: &models.V1AwsMachinePoolConfigEntity{ + CloudConfig: &models.V1AwsMachinePoolCloudConfigEntity{ + Azs: []string{"us-west-1a", "us-west-1b"}, + InstanceType: types.Ptr("t2.micro"), + CapacityType: types.Ptr("on-demand"), + RootDeviceSize: int64(65), + Subnets: []*models.V1AwsSubnetEntity{}, // assuming no az_subnets provided + AdditionalSecurityGroups: []*models.V1AwsResourceReference{ + { + ID: "sg-12345", + }, + { + ID: "sg-67890", + }, + }, + }, + PoolConfig: &models.V1MachinePoolConfigEntity{ + AdditionalLabels: map[string]string{}, + IsControlPlane: false, + Labels: []string{}, + Name: types.Ptr("testPool"), + Size: types.Ptr(int32(3)), + UpdateStrategy: &models.V1UpdateStrategy{ + Type: "RollingUpdateScaleOut", + }, + MinSize: int32(1), + MaxSize: int32(5), + UseControlPlaneAsWorker: false, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, _ := toMachinePoolAws(tc.input, tc.vpcId) + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Unexpected result (-want +got):\n%s", cmp.Diff(tc.expected, result)) + } + }) + } +} diff --git a/spectrocloud/resource_cluster_azure.go b/spectrocloud/resource_cluster_azure.go index f5d4ff00..8214e430 100644 --- a/spectrocloud/resource_cluster_azure.go +++ b/spectrocloud/resource_cluster_azure.go @@ -2,16 +2,19 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "fmt" "log" - "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + "github.com/spectrocloud/palette-sdk-go/client" ) func resourceClusterAzure() *schema.Resource { @@ -20,6 +23,7 @@ func resourceClusterAzure() *schema.Resource { ReadContext: resourceClusterAzureRead, UpdateContext: resourceClusterAzureUpdate, DeleteContext: resourceClusterDelete, + Description: "Resource for managing Azure clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -29,9 +33,16 @@ func resourceClusterAzure() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the cluster. This name will be used to create the cluster in Azure.", + }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), }, "tags": { Type: schema.TypeSet, @@ -40,100 +51,48 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{"pack"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - //ForceNew: true, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "spectro", - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // UI strips the trailing newline on save - return strings.TrimSpace(old) == strings.TrimSpace(new) - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, + "cluster_profile": schemas.ClusterProfileSchema(), "apply_setting": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Description: "Apply setting for the cluster. This can be set to `on_create` or `on_update`.", }, "cloud_account_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the cloud account to be used for the cluster. This cloud account must be of type `azure`.", }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, + Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", }, "cloud_config": { Type: schema.TypeList, @@ -143,44 +102,24 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "subscription_id": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Azure subscription ID. This can be found in the Azure portal under `Subscriptions`.", }, "resource_group": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Azure resource group. This can be found in the Azure portal under `Resource groups`.", }, "region": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Azure region. This can be found in the Azure portal under `Resource groups`.", }, "ssh_key": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "SSH key to be used for the cluster nodes.", }, }, }, @@ -198,56 +137,49 @@ func resourceClusterAzure() *schema.Resource { Type: schema.TypeString, }, }, - "taints": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "effect": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, + "taints": schemas.ClusterTaintsSchema(), "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, - //ForceNew: true, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, Required: true, //ForceNew: true, + Description: "Name of the machine pool. This must be unique within the cluster.", }, "count": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", + }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", }, "instance_type": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Azure instance type from the Azure portal.", }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), }, "disk": { Type: schema.TypeList, @@ -267,15 +199,18 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "size_gb": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Size of the disk in GB.", }, "type": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "Type of the disk. Valid values are `Standard_LRS`, `StandardSSD_LRS`, `Premium_LRS`.", }, }, }, + Description: "Disk configuration for the machine pool.", }, "azs": { Type: schema.TypeSet, @@ -284,10 +219,13 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "Availability zones for the machine pool.", }, "is_system_node_pool": { - Type: schema.TypeBool, - Required: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether this machine pool is a system node pool. Default value is `false'.", }, "os_type": { Type: schema.TypeString, @@ -295,152 +233,25 @@ func resourceClusterAzure() *schema.Resource { DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { return false }, + Default: "Linux", + ValidateFunc: validation.StringInSlice([]string{"Linux", "Windows"}, false), + Description: "Operating system type for the machine pool. Valid values are `Linux` and `Windows`. Defaults to `Linux`.", }, }, }, }, - "backup_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "backup_location_id": { - Type: schema.TypeString, - Required: true, - }, - "schedule": { - Type: schema.TypeString, - Required: true, - }, - "expiry_in_hour": { - Type: schema.TypeInt, - Required: true, - }, - "include_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "include_cluster_resources": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "namespaces": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "scan_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configuration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "penetration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "conformance_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cluster_rbac_binding": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - "role": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "subjects": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "namespaces": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "resource_allocation": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchemaComputed(), + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchemaComputed(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", }, - //"cloud_config": { - // Type: schema.TypeString, - // Required: true, - // //DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // // return false - // //}, - // //StateFunc: func(val interface{}) string { - // // return strings.ToLower(val.(string)) - // //}, - //}, }, } } @@ -451,14 +262,22 @@ func resourceClusterAzureCreate(ctx context.Context, d *schema.ResourceData, m i // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster := toAzureCluster(c, d) + cluster, err := toAzureCluster(c, d) + if err != nil { + return diag.FromErr(err) + } + diags = validateMasterPoolCount(cluster.Spec.Machinepoolconfig) + if diags != nil { + return diags + } - uid, err := c.CreateClusterAzure(cluster) + ClusterContext := d.Get("context").(string) + uid, err := c.CreateClusterAzure(cluster, ClusterContext) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) if isError { return diagnostics } @@ -474,9 +293,7 @@ func resourceClusterAzureRead(_ context.Context, d *schema.ResourceData, m inter var diags diag.Diagnostics - uid := d.Id() - - cluster, err := c.GetCluster(uid) + cluster, err := resourceClusterRead(d, c, diags) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -494,10 +311,11 @@ func resourceClusterAzureRead(_ context.Context, d *schema.ResourceData, m inter } func flattenCloudConfigAzure(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigAzure(configUID); err != nil { + if config, err := c.GetCloudConfigAzure(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsAzure(config.Spec.MachinePoolConfig) @@ -520,9 +338,9 @@ func flattenMachinePoolConfigsAzure(machinePools []*models.V1AzureMachinePoolCon for i, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = machinePool.Size @@ -532,7 +350,7 @@ func flattenMachinePoolConfigsAzure(machinePools []*models.V1AzureMachinePoolCon oi["is_system_node_pool"] = machinePool.IsSystemNodePool oi["azs"] = machinePool.Azs - + oi["os_type"] = machinePool.OsType if machinePool.OsDisk != nil { d := make(map[string]interface{}) d["size_gb"] = machinePool.OsDisk.DiskSizeGB @@ -554,8 +372,16 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - + ClusterContext := d.Get("context").(string) if d.HasChange("machine_pool") { + cluster, err := toAzureCluster(c, d) + if err != nil { + return diag.FromErr(err) + } + diags = validateMasterPoolCount(cluster.Spec.Machinepoolconfig) + if diags != nil { + return diags + } oraw, nraw := d.GetChange("machine_pool") if oraw == nil { oraw = new(schema.Set) @@ -575,26 +401,31 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolAzureHash(machinePoolResource) - - machinePool := toMachinePoolAzure(machinePoolResource) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolAzure(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolAzureHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolAzure(cloudConfigId, machinePool) - } - - if err != nil { - return diag.FromErr(err) + // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 + if machinePoolResource["name"].(string) != "" { + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolAzureHash(machinePoolResource) + var err error + machinePool, err := toMachinePoolAzure(machinePoolResource) + if err != nil { + diag.FromErr(err) + } + + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolAzure(cloudConfigId, ClusterContext, machinePool) + } else if hash != resourceMachinePoolAzureHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + err = c.UpdateMachinePoolAzure(cloudConfigId, ClusterContext, machinePool) + } + + if err != nil { + return diag.FromErr(err) + } + + // Processed (if exists) + delete(osMap, name) } - - // Processed (if exists) - delete(osMap, name) } // Deleted old machine pools @@ -602,15 +433,11 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolAzure(cloudConfigId, name); err != nil { + if err := c.DeleteMachinePoolAzure(cloudConfigId, name, ClusterContext); err != nil { return diag.FromErr(err) } } } - //TODO(saamalik) update for cluster as well - //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - // return diag.FromErr(err) - //} diagnostics, done := updateCommonFields(d, c) if done { @@ -622,11 +449,15 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i return diags } -func toAzureCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroAzureClusterEntity { +func toAzureCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroAzureClusterEntity, error) { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) //clientSecret := strfmt.Password(d.Get("azure_client_secret").(string)) + profiles, err := toProfiles(c, d) + if err != nil { + return nil, err + } cluster := &models.V1SpectroAzureClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -635,7 +466,7 @@ func toAzureCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spectr }, Spec: &models.V1SpectroAzureClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: toProfiles(c, d), + Profiles: profiles, Policies: toPolicies(d), CloudConfig: &models.V1AzureClusterConfig{ Location: types.Ptr(cloudConfig["region"].(string)), @@ -649,17 +480,20 @@ func toAzureCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spectr //for _, machinePool := range d.Get("machine_pool").([]interface{}) { machinePoolConfigs := make([]*models.V1AzureMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { - mp := toMachinePoolAzure(machinePool) + mp, err := toMachinePoolAzure(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } -func toMachinePoolAzure(machinePool interface{}) *models.V1AzureMachinePoolConfigEntity { +func toMachinePoolAzure(machinePool interface{}) (*models.V1AzureMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -720,5 +554,30 @@ func toMachinePoolAzure(machinePool interface{}) *models.V1AzureMachinePoolConfi UseControlPlaneAsWorker: controlPlaneAsWorker, }, } - return mp + + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + } + + return mp, nil +} + +func validateMasterPoolCount(machinePool []*models.V1AzureMachinePoolConfigEntity) diag.Diagnostics { + for _, machineConfig := range machinePool { + if machineConfig.PoolConfig.IsControlPlane { + if *machineConfig.PoolConfig.Size%2 == 0 { + return diag.FromErr(fmt.Errorf("The master node pool size should be in an odd number. But it set to an even number '%d' in node name '%s' ", *machineConfig.PoolConfig.Size, *machineConfig.PoolConfig.Name)) + } + } + } + return nil } diff --git a/spectrocloud/resource_cluster_edge_vsphere.go b/spectrocloud/resource_cluster_edge_vsphere.go index 86950672..d6f864ee 100644 --- a/spectrocloud/resource_cluster_edge_vsphere.go +++ b/spectrocloud/resource_cluster_edge_vsphere.go @@ -2,16 +2,18 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" "log" - "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + "github.com/spectrocloud/palette-sdk-go/client" ) func resourceClusterEdgeVsphere() *schema.Resource { @@ -33,6 +35,12 @@ func resourceClusterEdgeVsphere() *schema.Resource { Required: true, ForceNew: true, }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + }, "edge_host_uid": { Type: schema.TypeString, Required: true, @@ -45,90 +53,37 @@ func resourceClusterEdgeVsphere() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{"pack"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "spectro", - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // UI strips the trailing newline on save - return strings.TrimSpace(old) == strings.TrimSpace(new) - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, + "cluster_profile": schemas.ClusterProfileSchema(), "cloud_config_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, + Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", }, "cloud_config": { Type: schema.TypeList, @@ -140,7 +95,6 @@ func resourceClusterEdgeVsphere() *schema.Resource { "datacenter": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "folder": { Type: schema.TypeString, @@ -155,62 +109,36 @@ func resourceClusterEdgeVsphere() *schema.Resource { Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"cloud_config.0.ssh_key", "cloud_config.0.ssh_keys"}, + Description: "SSH Key (Secure Shell) to establish, administer, and communicate with remote clusters, `ssh_key & ssh_keys` are mutually exclusive.", }, "ssh_keys": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + ExactlyOneOf: []string{"cloud_config.0.ssh_key", "cloud_config.0.ssh_keys"}, Elem: &schema.Schema{ Type: schema.TypeString, }, - ExactlyOneOf: []string{"cloud_config.0.ssh_key", "cloud_config.0.ssh_keys"}, + Description: "List of SSH (Secure Shell) to establish, administer, and communicate with remote clusters, `ssh_key & ssh_keys` are mutually exclusive.", }, "vip": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "static_ip": { Type: schema.TypeBool, Optional: true, Default: false, - ForceNew: true, }, "network_type": { Type: schema.TypeString, Optional: true, - ForceNew: true, }, "network_search_domain": { Type: schema.TypeString, Optional: true, - ForceNew: true, - }, - }, - }, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, }, }, }, @@ -219,7 +147,7 @@ func resourceClusterEdgeVsphere() *schema.Resource { Type: schema.TypeList, Required: true, // disable hash to preserve machine pool order PE-255 - //Set: resourceMachinePoolVsphereHash, + // Set: resourceMachinePoolVsphereHash, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -233,44 +161,31 @@ func resourceClusterEdgeVsphere() *schema.Resource { Type: schema.TypeString, }, }, - "taints": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "effect": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, + "taints": schemas.ClusterTaintsSchema(), "control_plane": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, + //ForceNew: true, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "count": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), }, "instance_type": { Type: schema.TypeList, @@ -328,137 +243,17 @@ func resourceClusterEdgeVsphere() *schema.Resource { }, }, }, - "backup_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "backup_location_id": { - Type: schema.TypeString, - Required: true, - }, - "schedule": { - Type: schema.TypeString, - Required: true, - }, - "expiry_in_hour": { - Type: schema.TypeInt, - Required: true, - }, - "include_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "include_cluster_resources": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "namespaces": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "scan_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configuration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "penetration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "conformance_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cluster_rbac_binding": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - "role": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "subjects": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "namespaces": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "resource_allocation": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", }, }, } @@ -469,14 +264,18 @@ func resourceClusterEdgeVsphereCreate(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics - cluster := toEdgeVsphereCluster(c, d) + cluster, err := toEdgeVsphereCluster(c, d) + if err != nil { + return diag.FromErr(err) + } - uid, err := c.CreateClusterEdgeVsphere(cluster) + ClusterContext := d.Get("context").(string) + uid, err := c.CreateClusterEdgeVsphere(cluster, ClusterContext) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) if isError { return diagnostics } @@ -491,12 +290,11 @@ func resourceClusterEdgeVsphereRead(_ context.Context, d *schema.ResourceData, m var diags diag.Diagnostics - uid := d.Id() - - cluster, err := c.GetCluster(uid) + cluster, err := resourceClusterRead(d, c, diags) if err != nil { return diag.FromErr(err) } else if cluster == nil { + // Deleted - Terraform will recreate it d.SetId("") return diags } @@ -513,7 +311,8 @@ func flattenCloudConfigEdgeVsphere(configUID string, d *schema.ResourceData, c * if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigVsphere(configUID); err != nil { + ClusterContext := d.Get("context").(string) + if config, err := c.GetCloudConfigVsphere(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsEdgeVsphere(config.Spec.MachinePoolConfig) @@ -536,9 +335,8 @@ func flattenMachinePoolConfigsEdgeVsphere(machinePools []*models.V1VsphereMachin for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = machinePool.Size @@ -584,7 +382,7 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - + ClusterContext := d.Get("context").(string) if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -605,41 +403,46 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat for _, mp := range ns { machinePoolResource := mp.(map[string]interface{}) - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolVsphereHash(machinePoolResource) - - machinePool := toMachinePoolEdgeVsphere(machinePoolResource) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolVsphere(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolVsphereHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - oldMachinePool := toMachinePoolEdgeVsphere(oldMachinePool) - oldPlacements := oldMachinePool.CloudConfig.Placements - - for i, p := range machinePool.CloudConfig.Placements { - if len(oldPlacements) > i { - p.UID = oldPlacements[i].UID + // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 + if machinePoolResource["name"].(string) != "" { + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolVsphereHash(machinePoolResource) + var err error + machinePool, err := toMachinePoolEdgeVsphere(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } + + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) + } else if hash != resourceMachinePoolVsphereHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + oldMachinePool, _ := toMachinePoolEdgeVsphere(oldMachinePool) + oldPlacements := oldMachinePool.CloudConfig.Placements + + for i, p := range machinePool.CloudConfig.Placements { + if len(oldPlacements) > i { + p.UID = oldPlacements[i].UID + } } + + err = c.UpdateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) } - err = c.UpdateMachinePoolVsphere(cloudConfigId, machinePool) - } + if err != nil { + return diag.FromErr(err) + } - if err != nil { - return diag.FromErr(err) + delete(osMap, name) } - - delete(osMap, name) } for _, mp := range osMap { machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolVsphere(cloudConfigId, name); err != nil { + if err := c.DeleteMachinePoolVsphere(cloudConfigId, name, ClusterContext); err != nil { return diag.FromErr(err) } } @@ -655,11 +458,15 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat return diags } -func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroVsphereClusterEntity { +func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroVsphereClusterEntity, error) { cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) vip := cloudConfig["vip"].(string) + profiles, err := toProfiles(c, d) + if err != nil { + return nil, err + } cluster := &models.V1SpectroVsphereClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -669,8 +476,7 @@ func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1 Spec: &models.V1SpectroVsphereClusterEntitySpec{ EdgeHostUID: d.Get("edge_host_uid").(string), - - Profiles: toProfiles(c, d), + Profiles: profiles, Policies: toPolicies(d), CloudConfig: getClusterConfigEntity(cloudConfig), }, @@ -684,7 +490,10 @@ func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1 machinePoolConfigs := make([]*models.V1VsphereMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").([]interface{}) { - mp := toMachinePoolEdgeVsphere(machinePool) + mp, err := toMachinePoolEdgeVsphere(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -695,7 +504,7 @@ func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1 cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } func getSSHKey(cloudConfig map[string]interface{}) []string { @@ -719,7 +528,7 @@ func getImageTemplateFolder(cloudConfig map[string]interface{}) string { func getClusterConfigEntity(cloudConfig map[string]interface{}) *models.V1VsphereClusterConfigEntity { clusterConfigEntity := &models.V1VsphereClusterConfigEntity{ - NtpServers: nil, + NtpServers: toNtpServers(cloudConfig), Placement: &models.V1VspherePlacementConfigEntity{ Datacenter: cloudConfig["datacenter"].(string), Folder: cloudConfig["folder"].(string), @@ -731,7 +540,7 @@ func getClusterConfigEntity(cloudConfig map[string]interface{}) *models.V1Vspher return clusterConfigEntity } -func toMachinePoolEdgeVsphere(machinePool interface{}) *models.V1VsphereMachinePoolConfigEntity { +func toMachinePoolEdgeVsphere(machinePool interface{}) (*models.V1VsphereMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -789,5 +598,6 @@ func toMachinePoolEdgeVsphere(machinePool interface{}) *models.V1VsphereMachineP UseControlPlaneAsWorker: controlPlaneAsWorker, }, } - return mp + + return mp, nil } diff --git a/spectrocloud/resource_cluster_eks.go b/spectrocloud/resource_cluster_eks.go index 89a781e6..173d7eba 100644 --- a/spectrocloud/resource_cluster_eks.go +++ b/spectrocloud/resource_cluster_eks.go @@ -643,7 +643,7 @@ func flattenMachinePoolConfigsEks(machinePools []*models.V1EksMachinePoolConfig) for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) if *machinePool.IsControlPlane { continue diff --git a/spectrocloud/resource_cluster_gcp.go b/spectrocloud/resource_cluster_gcp.go index a154713f..caad58ce 100644 --- a/spectrocloud/resource_cluster_gcp.go +++ b/spectrocloud/resource_cluster_gcp.go @@ -2,16 +2,18 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" "log" - "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + "github.com/spectrocloud/palette-sdk-go/client" ) func resourceClusterGcp() *schema.Resource { @@ -20,6 +22,7 @@ func resourceClusterGcp() *schema.Resource { ReadContext: resourceClusterGcpRead, UpdateContext: resourceClusterGcpUpdate, DeleteContext: resourceClusterDelete, + Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -34,6 +37,12 @@ func resourceClusterGcp() *schema.Resource { Required: true, ForceNew: true, }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + }, "tags": { Type: schema.TypeSet, Optional: true, @@ -41,69 +50,9 @@ func resourceClusterGcp() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{"pack"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "spectro", - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // UI strips the trailing newline on save - return strings.TrimSpace(old) == strings.TrimSpace(new) - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, + "cluster_profile": schemas.ClusterProfileSchema(), "apply_setting": { Type: schema.TypeString, Optional: true, @@ -114,26 +63,33 @@ func resourceClusterGcp() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, + Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", }, "cloud_config": { Type: schema.TypeList, @@ -157,30 +113,6 @@ func resourceClusterGcp() *schema.Resource { }, }, }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, "machine_pool": { Type: schema.TypeSet, Required: true, @@ -194,38 +126,20 @@ func resourceClusterGcp() *schema.Resource { Type: schema.TypeString, }, }, - "taints": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "effect": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, + "taints": schemas.ClusterTaintsSchema(), "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, - //ForceNew: true, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, @@ -233,17 +147,26 @@ func resourceClusterGcp() *schema.Resource { //ForceNew: true, }, "count": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", + }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", }, "instance_type": { Type: schema.TypeString, Required: true, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), }, "disk_size_gb": { Type: schema.TypeInt, @@ -262,137 +185,17 @@ func resourceClusterGcp() *schema.Resource { }, }, }, - "backup_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "backup_location_id": { - Type: schema.TypeString, - Required: true, - }, - "schedule": { - Type: schema.TypeString, - Required: true, - }, - "expiry_in_hour": { - Type: schema.TypeInt, - Required: true, - }, - "include_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "include_cluster_resources": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "namespaces": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "scan_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configuration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "penetration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "conformance_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cluster_rbac_binding": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - "role": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "subjects": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "namespaces": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "resource_allocation": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchemaComputed(), + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchemaComputed(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", }, }, } @@ -404,14 +207,18 @@ func resourceClusterGcpCreate(ctx context.Context, d *schema.ResourceData, m int // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster := toGcpCluster(c, d) + cluster, err := toGcpCluster(c, d) + if err != nil { + return diag.FromErr(err) + } - uid, err := c.CreateClusterGcp(cluster) + ClusterContext := d.Get("context").(string) + uid, err := c.CreateClusterGcp(cluster, ClusterContext) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) if isError { return diagnostics } @@ -427,9 +234,7 @@ func resourceClusterGcpRead(_ context.Context, d *schema.ResourceData, m interfa var diags diag.Diagnostics - uid := d.Id() - - cluster, err := c.GetCluster(uid) + cluster, err := resourceClusterRead(d, c, diags) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -447,10 +252,11 @@ func resourceClusterGcpRead(_ context.Context, d *schema.ResourceData, m interfa } func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigGcp(configUID); err != nil { + if config, err := c.GetCloudConfigGcp(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsGcp(config.Spec.MachinePoolConfig) @@ -473,9 +279,9 @@ func flattenMachinePoolConfigsGcp(machinePools []*models.V1GcpMachinePoolConfig) for i, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = int(machinePool.Size) @@ -486,7 +292,6 @@ func flattenMachinePoolConfigsGcp(machinePools []*models.V1GcpMachinePoolConfig) oi["disk_size_gb"] = int(machinePool.RootDeviceSize) oi["azs"] = machinePool.Azs - ois[i] = oi } @@ -500,7 +305,7 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - + ClusterContext := d.Get("context").(string) if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -521,26 +326,31 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolGcpHash(machinePoolResource) - - machinePool := toMachinePoolGcp(machinePoolResource) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolGcp(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolGcpHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolGcp(cloudConfigId, machinePool) + // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 + if machinePoolResource["name"].(string) != "" { + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolGcpHash(machinePoolResource) + var err error + machinePool, err := toMachinePoolGcp(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } + + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolGcp(cloudConfigId, ClusterContext, machinePool) + } else if hash != resourceMachinePoolGcpHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + err = c.UpdateMachinePoolGcp(cloudConfigId, ClusterContext, machinePool) + } + + if err != nil { + return diag.FromErr(err) + } + + // Processed (if exists) + delete(osMap, name) } - - if err != nil { - return diag.FromErr(err) - } - - // Processed (if exists) - delete(osMap, name) } // Deleted old machine pools @@ -548,15 +358,11 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolGcp(cloudConfigId, name); err != nil { + if err := c.DeleteMachinePoolGcp(cloudConfigId, name, ClusterContext); err != nil { return diag.FromErr(err) } } } - //TODO(saamalik) update for cluster as well - //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - // return diag.FromErr(err) - //} diagnostics, done := updateCommonFields(d, c) if done { @@ -568,11 +374,15 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int return diags } -func toGcpCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroGcpClusterEntity { +func toGcpCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroGcpClusterEntity, error) { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) //clientSecret := strfmt.Password(d.Get("gcp_client_secret").(string)) + profiles, err := toProfiles(c, d) + if err != nil { + return nil, err + } cluster := &models.V1SpectroGcpClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -581,7 +391,7 @@ func toGcpCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroG }, Spec: &models.V1SpectroGcpClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: toProfiles(c, d), + Profiles: profiles, Policies: toPolicies(d), CloudConfig: &models.V1GcpClusterConfig{ Network: cloudConfig["network"].(string), @@ -593,17 +403,20 @@ func toGcpCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroG machinePoolConfigs := make([]*models.V1GcpMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { - mp := toMachinePoolGcp(machinePool) + mp, err := toMachinePoolGcp(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } -func toMachinePoolGcp(machinePool interface{}) *models.V1GcpMachinePoolConfigEntity { +func toMachinePoolGcp(machinePool interface{}) (*models.V1GcpMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -637,5 +450,19 @@ func toMachinePoolGcp(machinePool interface{}) *models.V1GcpMachinePoolConfigEnt UseControlPlaneAsWorker: controlPlaneAsWorker, }, } - return mp + + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + } + + return mp, nil } diff --git a/spectrocloud/resource_cluster_libvirt.go b/spectrocloud/resource_cluster_libvirt.go index 942620d8..3e8f5d5a 100644 --- a/spectrocloud/resource_cluster_libvirt.go +++ b/spectrocloud/resource_cluster_libvirt.go @@ -637,9 +637,8 @@ func flattenMachinePoolConfigsLibvirt(machinePools []*models.V1LibvirtMachinePoo for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = machinePool.Size @@ -949,6 +948,7 @@ func toMachinePoolLibvirt(machinePool interface{}) (*models.V1LibvirtMachinePool UseControlPlaneAsWorker: controlPlaneAsWorker, }, } + return mp, nil } diff --git a/spectrocloud/resource_cluster_maas.go b/spectrocloud/resource_cluster_maas.go index 597efec3..bd6532b2 100644 --- a/spectrocloud/resource_cluster_maas.go +++ b/spectrocloud/resource_cluster_maas.go @@ -2,16 +2,18 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" "log" - "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + "github.com/spectrocloud/palette-sdk-go/client" ) func resourceClusterMaas() *schema.Resource { @@ -20,6 +22,7 @@ func resourceClusterMaas() *schema.Resource { ReadContext: resourceClusterMaasRead, UpdateContext: resourceClusterMaasUpdate, DeleteContext: resourceClusterDelete, + Description: "Resource for managing MAAS clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -34,6 +37,12 @@ func resourceClusterMaas() *schema.Resource { Required: true, ForceNew: true, }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + }, "tags": { Type: schema.TypeSet, Optional: true, @@ -41,68 +50,9 @@ func resourceClusterMaas() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "spectro", - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // UI strips the trailing newline on save - return strings.TrimSpace(old) == strings.TrimSpace(new) - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, + "cluster_profile": schemas.ClusterProfileSchema(), "apply_setting": { Type: schema.TypeString, Optional: true, @@ -113,26 +63,33 @@ func resourceClusterMaas() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, + Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", }, "cloud_config": { Type: schema.TypeList, @@ -148,30 +105,6 @@ func resourceClusterMaas() *schema.Resource { }, }, }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, "machine_pool": { Type: schema.TypeSet, Required: true, @@ -185,38 +118,20 @@ func resourceClusterMaas() *schema.Resource { Type: schema.TypeString, }, }, - "taints": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "effect": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, + "taints": schemas.ClusterTaintsSchema(), "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, - //ForceNew: true, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, @@ -224,8 +139,25 @@ func resourceClusterMaas() *schema.Resource { //ForceNew: true, }, "count": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", + }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "Minimum number of nodes in the machine pool. This is used for autoscaling the machine pool.", + }, + "max": { + Type: schema.TypeInt, + Optional: true, + Description: "Maximum number of nodes in the machine pool. This is used for autoscaling the machine pool.", }, "instance_type": { Type: schema.TypeList, @@ -245,9 +177,11 @@ func resourceClusterMaas() *schema.Resource { }, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), }, "azs": { Type: schema.TypeSet, @@ -277,137 +211,17 @@ func resourceClusterMaas() *schema.Resource { }, }, }, - "backup_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "backup_location_id": { - Type: schema.TypeString, - Required: true, - }, - "schedule": { - Type: schema.TypeString, - Required: true, - }, - "expiry_in_hour": { - Type: schema.TypeInt, - Required: true, - }, - "include_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "include_cluster_resources": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "namespaces": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "scan_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configuration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "penetration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "conformance_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cluster_rbac_binding": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - "role": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "subjects": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "namespaces": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "resource_allocation": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", }, }, } @@ -419,14 +233,18 @@ func resourceClusterMaasCreate(ctx context.Context, d *schema.ResourceData, m in // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster := toMaasCluster(c, d) + cluster, err := toMaasCluster(c, d) + if err != nil { + return diag.FromErr(err) + } - uid, err := c.CreateClusterMaas(cluster) + ClusterContext := d.Get("context").(string) + uid, err := c.CreateClusterMaas(cluster, ClusterContext) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) if isError { return diagnostics } @@ -441,10 +259,8 @@ func resourceClusterMaasRead(_ context.Context, d *schema.ResourceData, m interf c := m.(*client.V1Client) var diags diag.Diagnostics - // - uid := d.Id() - // - cluster, err := c.GetCluster(uid) + + cluster, err := resourceClusterRead(d, c, diags) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -462,11 +278,12 @@ func resourceClusterMaasRead(_ context.Context, d *schema.ResourceData, m interf } func flattenCloudConfigMaas(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + ClusterContext := d.Get("context").(string) err := d.Set("cloud_config_id", configUID) if err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigMaas(configUID); err != nil { + if config, err := c.GetCloudConfigMaas(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsMaas(config.Spec.MachinePoolConfig) @@ -489,14 +306,16 @@ func flattenMachinePoolConfigsMaas(machinePools []*models.V1MaasMachinePoolConfi for i, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(&machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = int(machinePool.Size) flattenUpdateStrategy(machinePool.UpdateStrategy, oi) + oi["min"] = int(machinePool.MinSize) + oi["max"] = int(machinePool.MaxSize) oi["instance_type"] = machinePool.InstanceType if machinePool.InstanceType != nil { @@ -522,7 +341,7 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - + ClusterContext := d.Get("context").(string) if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -543,26 +362,33 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolMaasHash(machinePoolResource) - - machinePool := toMachinePoolMaas(machinePoolResource) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolMaas(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolMaasHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolMaas(cloudConfigId, machinePool) - } - - if err != nil { - return diag.FromErr(err) + // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 + if machinePoolResource["name"].(string) != "" { + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolMaasHash(machinePoolResource) + + var err error + machinePool, err := toMachinePoolMaas(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } + + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolMaas(cloudConfigId, ClusterContext, machinePool) + } else if hash != resourceMachinePoolMaasHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + err = c.UpdateMachinePoolMaas(cloudConfigId, ClusterContext, machinePool) + } + + if err != nil { + return diag.FromErr(err) + } + + // Processed (if exists) + delete(osMap, name) } - // Processed (if exists) - delete(osMap, name) } // Deleted old machine pools @@ -570,15 +396,11 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolMaas(cloudConfigId, name); err != nil { + if err := c.DeleteMachinePoolMaas(cloudConfigId, name, ClusterContext); err != nil { return diag.FromErr(err) } } } - //TODO(saamalik) update for cluster as well - //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - // return diag.FromErr(err) - //} diagnostics, done := updateCommonFields(d, c) if done { @@ -590,11 +412,15 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in return diags } -func toMaasCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroMaasClusterEntity { +func toMaasCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroMaasClusterEntity, error) { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) DomainVal := cloudConfig["domain"].(string) + profiles, err := toProfiles(c, d) + if err != nil { + return nil, err + } cluster := &models.V1SpectroMaasClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -603,7 +429,7 @@ func toMaasCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spectro }, Spec: &models.V1SpectroMaasClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: toProfiles(c, d), + Profiles: profiles, Policies: toPolicies(d), CloudConfig: &models.V1MaasClusterConfig{ Domain: &DomainVal, @@ -614,17 +440,20 @@ func toMaasCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spectro //for _, machinePool := range d.Get("machine_pool").([]interface{}) { machinePoolConfigs := make([]*models.V1MaasMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { - mp := toMachinePoolMaas(machinePool) + mp, err := toMachinePoolMaas(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } -func toMachinePoolMaas(machinePool interface{}) *models.V1MaasMachinePoolConfigEntity { +func toMachinePoolMaas(machinePool interface{}) (*models.V1MaasMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -642,6 +471,17 @@ func toMachinePoolMaas(machinePool interface{}) *models.V1MaasMachinePoolConfigE InstanceType := m["instance_type"].([]interface{})[0].(map[string]interface{}) Placement := m["placement"].([]interface{})[0].(map[string]interface{}) log.Printf("Create machine pool %s", InstanceType) + + min := int32(m["count"].(int)) + max := int32(m["count"].(int)) + + if m["min"] != nil { + min = int32(m["min"].(int)) + } + + if m["max"] != nil { + max = int32(m["max"].(int)) + } mp := &models.V1MaasMachinePoolConfigEntity{ CloudConfig: &models.V1MaasMachinePoolCloudConfigEntity{ Azs: azs, @@ -662,7 +502,23 @@ func toMachinePoolMaas(machinePool interface{}) *models.V1MaasMachinePoolConfigE Type: getUpdateStrategy(m), }, UseControlPlaneAsWorker: controlPlaneAsWorker, + MinSize: min, + MaxSize: max, }, } - return mp + + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + } + + return mp, nil } diff --git a/spectrocloud/resource_cluster_openstack.go b/spectrocloud/resource_cluster_openstack.go index f4db3313..1b2cfd51 100644 --- a/spectrocloud/resource_cluster_openstack.go +++ b/spectrocloud/resource_cluster_openstack.go @@ -2,17 +2,19 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" "log" "sort" - "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + "github.com/spectrocloud/palette-sdk-go/client" ) func resourceClusterOpenStack() *schema.Resource { @@ -21,6 +23,7 @@ func resourceClusterOpenStack() *schema.Resource { ReadContext: resourceClusterOpenStackRead, UpdateContext: resourceClusterOpenStackUpdate, DeleteContext: resourceClusterDelete, + Description: "Resource for managing Openstack clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(180 * time.Minute), @@ -34,6 +37,12 @@ func resourceClusterOpenStack() *schema.Resource { Required: true, ForceNew: true, }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + }, "tags": { Type: schema.TypeSet, Optional: true, @@ -41,69 +50,9 @@ func resourceClusterOpenStack() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{"pack"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "spectro", - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // UI strips the trailing newline on save - return strings.TrimSpace(old) == strings.TrimSpace(new) - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, + "cluster_profile": schemas.ClusterProfileSchema(), "apply_setting": { Type: schema.TypeString, Optional: true, @@ -114,26 +63,33 @@ func resourceClusterOpenStack() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, + Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", }, "cloud_config": { Type: schema.TypeList, @@ -182,30 +138,6 @@ func resourceClusterOpenStack() *schema.Resource { }, }, }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, "machine_pool": { Type: schema.TypeList, Required: true, @@ -218,48 +150,41 @@ func resourceClusterOpenStack() *schema.Resource { Type: schema.TypeString, }, }, - "taints": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "effect": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, + "taints": schemas.ClusterTaintsSchema(), "control_plane": { - Type: schema.TypeBool, - Optional: true, - Default: false, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, + //ForceNew: true, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, Required: true, }, "count": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", + }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), }, "instance_type": { Type: schema.TypeString, @@ -279,137 +204,17 @@ func resourceClusterOpenStack() *schema.Resource { }, }, }, - "backup_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "backup_location_id": { - Type: schema.TypeString, - Required: true, - }, - "schedule": { - Type: schema.TypeString, - Required: true, - }, - "expiry_in_hour": { - Type: schema.TypeInt, - Required: true, - }, - "include_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "include_cluster_resources": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "namespaces": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "scan_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configuration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "penetration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "conformance_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cluster_rbac_binding": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - "role": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "subjects": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "namespaces": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "resource_allocation": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", }, }, } @@ -421,14 +226,18 @@ func resourceClusterOpenStackCreate(ctx context.Context, d *schema.ResourceData, // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster := toOpenStackCluster(c, d) + cluster, err := toOpenStackCluster(c, d) + if err != nil { + return diag.FromErr(err) + } - uid, err := c.CreateClusterOpenStack(cluster) + ClusterContext := d.Get("context").(string) + uid, err := c.CreateClusterOpenStack(cluster, ClusterContext) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) if isError { return diagnostics } @@ -438,10 +247,14 @@ func resourceClusterOpenStackCreate(ctx context.Context, d *schema.ResourceData, return diags } -func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroOpenStackClusterEntity { +func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroOpenStackClusterEntity, error) { cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) + profiles, err := toProfiles(c, d) + if err != nil { + return nil, err + } cluster := &models.V1SpectroOpenStackClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -450,7 +263,7 @@ func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Sp }, Spec: &models.V1SpectroOpenStackClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: toProfiles(c, d), + Profiles: profiles, Policies: toPolicies(d), CloudConfig: &models.V1OpenStackClusterConfig{ Region: cloudConfig["region"].(string), @@ -485,7 +298,10 @@ func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Sp machinePoolConfigs := make([]*models.V1OpenStackMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").([]interface{}) { - mp := toMachinePoolOpenStack(machinePool) + mp, err := toMachinePoolOpenStack(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -497,7 +313,7 @@ func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Sp cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } //goland:noinspection GoUnhandledErrorResult @@ -506,9 +322,7 @@ func resourceClusterOpenStackRead(_ context.Context, d *schema.ResourceData, m i var diags diag.Diagnostics - uid := d.Id() - - cluster, err := c.GetCluster(uid) + cluster, err := resourceClusterRead(d, c, diags) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -521,7 +335,8 @@ func resourceClusterOpenStackRead(_ context.Context, d *schema.ResourceData, m i if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigOpenStack(configUID); err != nil { + ClusterContext := d.Get("context").(string) + if config, err := c.GetCloudConfigOpenStack(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsOpenStack(config.Spec.MachinePoolConfig) @@ -549,9 +364,9 @@ func flattenMachinePoolConfigsOpenStack(machinePools []*models.V1OpenStackMachin for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(&machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = int(machinePool.Size) @@ -574,7 +389,7 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - + ClusterContext := d.Get("context").(string) if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -595,26 +410,32 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, for _, mp := range ns { machinePoolResource := mp.(map[string]interface{}) - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolOpenStackHash(machinePoolResource) - - machinePool := toMachinePoolOpenStack(machinePoolResource) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolOpenStack(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolOpenStackHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolOpenStack(cloudConfigId, machinePool) + // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 + if machinePoolResource["name"].(string) != "" { + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolOpenStackHash(machinePoolResource) + + var err error + machinePool, err := toMachinePoolOpenStack(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } + + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolOpenStack(cloudConfigId, ClusterContext, machinePool) + } else if hash != resourceMachinePoolOpenStackHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + err = c.UpdateMachinePoolOpenStack(cloudConfigId, ClusterContext, machinePool) + } + + if err != nil { + return diag.FromErr(err) + } + + // Processed (if exists) + delete(osMap, name) } - - if err != nil { - return diag.FromErr(err) - } - - // Processed (if exists) - delete(osMap, name) } // Deleted old machine pools @@ -622,7 +443,7 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolOpenStack(cloudConfigId, name); err != nil { + if err := c.DeleteMachinePoolOpenStack(cloudConfigId, name, ClusterContext); err != nil { return diag.FromErr(err) } } @@ -638,7 +459,7 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, return diags } -func toMachinePoolOpenStack(machinePool interface{}) *models.V1OpenStackMachinePoolConfigEntity { +func toMachinePoolOpenStack(machinePool interface{}) (*models.V1OpenStackMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -676,5 +497,19 @@ func toMachinePoolOpenStack(machinePool interface{}) *models.V1OpenStackMachineP UseControlPlaneAsWorker: controlPlaneAsWorker, }, } - return mp + + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + } + + return mp, nil } diff --git a/spectrocloud/resource_cluster_tke.go b/spectrocloud/resource_cluster_tke.go index 9c81eb4b..38e34010 100644 --- a/spectrocloud/resource_cluster_tke.go +++ b/spectrocloud/resource_cluster_tke.go @@ -494,7 +494,7 @@ func flattenMachinePoolConfigsTke(machinePools []*models.V1TencentMachinePoolCon for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) if machinePool.IsControlPlane { continue diff --git a/spectrocloud/resource_cluster_vsphere.go b/spectrocloud/resource_cluster_vsphere.go index e4fc15a6..ba8eb33b 100644 --- a/spectrocloud/resource_cluster_vsphere.go +++ b/spectrocloud/resource_cluster_vsphere.go @@ -2,8 +2,8 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "errors" + "fmt" "log" "sort" "strings" @@ -11,8 +11,12 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + "github.com/spectrocloud/palette-sdk-go/client" + + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" ) func resourceClusterVsphere() *schema.Resource { @@ -21,6 +25,7 @@ func resourceClusterVsphere() *schema.Resource { ReadContext: resourceClusterVsphereRead, UpdateContext: resourceClusterVsphereUpdate, DeleteContext: resourceClusterDelete, + Description: "A resource to manage a vSphere cluster in Pallette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(180 * time.Minute), @@ -30,9 +35,16 @@ func resourceClusterVsphere() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the cluster.", + }, + "context": { + Type: schema.TypeString, + Optional: true, + Default: "project", + ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), }, "tags": { Type: schema.TypeSet, @@ -41,104 +53,52 @@ func resourceClusterVsphere() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, + Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile_id": { - Type: schema.TypeString, - Optional: true, - Deprecated: "Switch to cluster_profile", - }, - "cluster_profile": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{"cluster_profile_id", "pack"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Required: true, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Optional: true, - Default: "spectro", - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "registry_uid": { - Type: schema.TypeString, - Optional: true, - }, - "tag": { - Type: schema.TypeString, - Optional: true, - }, - "values": { - Type: schema.TypeString, - Required: true, - }, - "manifest": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "content": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // UI strips the trailing newline on save - return strings.TrimSpace(old) == strings.TrimSpace(new) - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, + "cluster_profile": schemas.ClusterProfileSchema(), "apply_setting": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Default: "DownloadAndInstall", + ValidateFunc: validation.StringInSlice([]string{"DownloadAndInstall", "DownloadAndInstallLater"}, false), + Description: "The setting to apply the cluster profile. `DownloadAndInstall` will download and install packs in one action. " + + "`DownloadAndInstallLater` will only download artifact and postpone install for later. " + + "Default value is `DownloadAndInstall`.", }, "cloud_account_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "ID of the cloud account to be used for the cluster. This cloud account must be of type `vsphere`.", }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", + Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to apply OS patch on boot. Default is `false`.", }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, + Description: "The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, + Description: "The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", }, "cloud_config": { Type: schema.TypeList, @@ -148,61 +108,54 @@ func resourceClusterVsphere() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "datacenter": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "The name of the datacenter in vSphere. This is the name of the datacenter as it appears in vSphere.", }, "folder": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "The name of the folder in vSphere. This is the name of the folder as it appears in vSphere.", }, "image_template_folder": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Description: "The name of the image template folder in vSphere. This is the name of the folder as it appears in vSphere.", }, "ssh_key": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "The SSH key to be used for the cluster. This is the public key that will be used to access the cluster.", }, "static_ip": { Type: schema.TypeBool, Optional: true, Default: false, + Description: "Whether to use static IP addresses for the cluster. If `true`, the cluster will use static IP addresses. " + + "If `false`, the cluster will use DDNS. Default is `false`.", }, // DHCP Properties "network_type": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Description: "The type of network to use for the cluster. This can be `VIP` or `DDNS`.", }, "network_search_domain": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "pack": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + Description: "The search domain to use for the cluster in case of DHCP.", }, - "registry_uid": { - Type: schema.TypeString, + "ntp_servers": { + Type: schema.TypeSet, Optional: true, - }, - "tag": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeString, - Required: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "A list of NTP servers to be used by the cluster.", }, }, }, @@ -217,6 +170,7 @@ func resourceClusterVsphere() *schema.Resource { Type: schema.TypeString, Required: true, //ForceNew: true, + Description: "The name of the machine pool. This is used to identify the machine pool in the cluster.", }, "additional_labels": { Type: schema.TypeMap, @@ -225,47 +179,38 @@ func resourceClusterVsphere() *schema.Resource { Type: schema.TypeString, }, }, - "taints": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - "effect": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, + "taints": schemas.ClusterTaintsSchema(), "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, + Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, - //ForceNew: true, + Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", + }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", }, "count": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "Number of nodes in the machine pool.", }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", + ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), }, "instance_type": { Type: schema.TypeList, @@ -274,16 +219,19 @@ func resourceClusterVsphere() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disk_size_gb": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The size of the disk in GB.", }, "memory_mb": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The amount of memory in MB.", }, "cpu": { - Type: schema.TypeInt, - Required: true, + Type: schema.TypeInt, + Required: true, + Description: "The number of CPUs.", }, }, }, @@ -298,24 +246,29 @@ func resourceClusterVsphere() *schema.Resource { Computed: true, }, "cluster": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "The name of the cluster to use for the machine pool. As it appears in the vSphere.", }, "resource_pool": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "The name of the resource pool to use for the machine pool. As it appears in the vSphere.", }, "datastore": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "The name of the datastore to use for the machine pool. As it appears in the vSphere.", }, "network": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Description: "The name of the network to use for the machine pool. As it appears in the vSphere.", }, "static_ip_pool_id": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Description: "The ID of the static IP pool to use for the machine pool in case of static cluster placement.", }, }, }, @@ -323,137 +276,17 @@ func resourceClusterVsphere() *schema.Resource { }, }, }, - "backup_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Required: true, - }, - "backup_location_id": { - Type: schema.TypeString, - Required: true, - }, - "schedule": { - Type: schema.TypeString, - Required: true, - }, - "expiry_in_hour": { - Type: schema.TypeInt, - Required: true, - }, - "include_disks": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "include_cluster_resources": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "namespaces": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "scan_policy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "configuration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "penetration_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - "conformance_scan_schedule": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "cluster_rbac_binding": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - "role": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "subjects": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "namespaces": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "resource_allocation": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "backup_policy": schemas.BackupPolicySchema(), + "scan_policy": schemas.ScanPolicySchema(), + "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), + "namespaces": schemas.ClusterNamespacesSchema(), + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", }, }, } @@ -465,14 +298,18 @@ func resourceClusterVsphereCreate(ctx context.Context, d *schema.ResourceData, m // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster := toVsphereCluster(c, d) + cluster, err := toVsphereCluster(c, d) + if err != nil { + return diag.FromErr(err) + } - uid, err := c.CreateClusterVsphere(cluster) + ClusterContext := d.Get("context").(string) + uid, err := c.CreateClusterVsphere(cluster, ClusterContext) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) if isError { return diagnostics } @@ -488,9 +325,7 @@ func resourceClusterVsphereRead(_ context.Context, d *schema.ResourceData, m int var diags diag.Diagnostics - uid := d.Id() - - cluster, err := c.GetCluster(uid) + cluster, err := resourceClusterRead(d, c, diags) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -503,7 +338,8 @@ func resourceClusterVsphereRead(_ context.Context, d *schema.ResourceData, m int if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigVsphere(configUID); err != nil { + ClusterContext := d.Get("context").(string) + if config, err := c.GetCloudConfigVsphere(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsVsphere(config.Spec.MachinePoolConfig) @@ -521,13 +357,14 @@ func resourceClusterVsphereRead(_ context.Context, d *schema.ResourceData, m int } func flattenCloudConfigVsphere(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { + ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigVsphere(configUID); err != nil { + if config, err := c.GetCloudConfigVsphere(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { - cloudConfig, err := c.GetVsphereClouldConfigValues(configUID) + cloudConfig, err := c.GetCloudConfigVsphereValues(configUID, ClusterContext) if err != nil { return diag.FromErr(err) } @@ -568,6 +405,10 @@ func flattenClusterConfigsVsphere(cloudConfig *models.V1VsphereCloudConfig) inte ret["network_search_domain"] = cpEndpoint.DdnsSearchDomain } + if cloudConfig.Spec.ClusterConfig.NtpServers != nil { + ret["ntp_servers"] = cloudConfig.Spec.ClusterConfig.NtpServers + } + cloudConfigFlatten = append(cloudConfigFlatten, ret) return cloudConfigFlatten @@ -584,9 +425,9 @@ func flattenMachinePoolConfigsVsphere(machinePools []*models.V1VsphereMachinePoo for i, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) - oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = machinePool.Size @@ -626,6 +467,80 @@ func flattenMachinePoolConfigsVsphere(machinePools []*models.V1VsphereMachinePoo return ois } +func sortPlacementStructs(structs []interface{}) { + sort.Slice(structs, func(i, j int) bool { + clusterI := structs[i].(map[string]interface{})["cluster"] + clusterJ := structs[j].(map[string]interface{})["cluster"] + if clusterI != clusterJ { + return clusterI.(string) < clusterJ.(string) + } + datastoreI := structs[i].(map[string]interface{})["datastore"] + datastoreJ := structs[j].(map[string]interface{})["datastore"] + if datastoreI != datastoreJ { + return datastoreI.(string) < datastoreJ.(string) + } + resourcePoolI := structs[i].(map[string]interface{})["resource_pool"] + resourcePoolJ := structs[j].(map[string]interface{})["resource_pool"] + if resourcePoolI != resourcePoolJ { + return resourcePoolI.(string) < resourcePoolJ.(string) + } + networkI := structs[i].(map[string]interface{})["network"] + networkJ := structs[j].(map[string]interface{})["network"] + return networkI.(string) < networkJ.(string) + }) +} + +func ValidateMachinePoolChange(oMPool interface{}, nMPool interface{}) (bool, error) { + var oPlacements []interface{} + var nPlacements []interface{} + // Identifying control plane placements from machine pool interface before change + for i, oMachinePool := range oMPool.(*schema.Set).List() { + if oMachinePool.(map[string]interface{})["control_plane"] == true { + oPlacements = oMPool.(*schema.Set).List()[i].(map[string]interface{})["placement"].([]interface{}) + } + } + // Identifying control plane placements from machine pool interface after change + for _, nMachinePool := range nMPool.(*schema.Set).List() { + if nMachinePool.(map[string]interface{})["control_plane"] == true { + nPlacements = nMachinePool.(map[string]interface{})["placement"].([]interface{}) + } + } + // Validating any New or old placements got added/removed. + if len(nPlacements) != len(oPlacements) { + errMsg := `Placement validation error - Adding/Removing placement component in control plane is not allowed. +To update the placement configuration in the control plane, kindly recreate the cluster.` + return true, errors.New(errMsg) + } + + // Need to add sort with all fields + // oPlacements and nPlacements for correct comparison in case order was changed + sortPlacementStructs(oPlacements) + sortPlacementStructs(nPlacements) + + // Validating any New or old placements got changed. + for pIndex, nP := range nPlacements { + oPlacement := oPlacements[pIndex].(map[string]interface{}) + nPlacement := nP.(map[string]interface{}) + if oPlacement["cluster"] != nPlacement["cluster"] { + errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `ComputeCluster` value. Old value - %s, New value - %s ", oPlacement["cluster"], nPlacement["cluster"]) + return true, errors.New(errMsg) + } + if oPlacement["datastore"] != nPlacement["datastore"] { + errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `DataStore` value. Old value - %s, New value - %s ", oPlacement["datastore"], nPlacement["datastore"]) + return true, errors.New(errMsg) + } + if oPlacement["resource_pool"] != nPlacement["resource_pool"] { + errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `resource_pool` value. Old value - %s, New value - %s ", oPlacement["resource_pool"], nPlacement["resource_pool"]) + return true, errors.New(errMsg) + } + if oPlacement["network"] != nPlacement["network"] { + errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `Network` value. Old value - %s, New value - %s ", oPlacement["network"], nPlacement["network"]) + return true, errors.New(errMsg) + } + } + return false, nil +} + func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { c := m.(*client.V1Client) @@ -633,14 +548,25 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - + ClusterContext := d.Get("context").(string) if d.HasChange("cloud_config") { + occ, ncc := d.GetChange("cloud_config") + if occ.([]interface{})[0].(map[string]interface{})["datacenter"] != ncc.([]interface{})[0].(map[string]interface{})["datacenter"] { + return diag.Errorf("Validation error: %s", "Datacenter value cannot be updated after cluster provisioning. Kindly destroy and recreate with updated Datacenter attribute.") + } cloudConfig := toCloudConfigUpdate(d.Get("cloud_config").([]interface{})[0].(map[string]interface{})) - c.UpdateVsphereCloudConfigValues(cloudConfigId, cloudConfig) + if err := c.UpdateCloudConfigVsphereValues(cloudConfigId, ClusterContext, cloudConfig); err != nil { + return diag.FromErr(err) + } } if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") + if oraw != nil && nraw != nil { + if ok, err := ValidateMachinePoolChange(oraw, nraw); ok { + return diag.Errorf(err.Error()) + } + } if oraw == nil { oraw = new(schema.Set) } @@ -659,36 +585,41 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolVsphereHash(machinePoolResource) - - machinePool := toMachinePoolVsphere(machinePoolResource) - - var err error - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolVsphere(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolVsphereHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - oldMachinePool := toMachinePoolVsphere(oldMachinePool) - oldPlacements := oldMachinePool.CloudConfig.Placements - - // set the placement ids - for i, p := range machinePool.CloudConfig.Placements { - if len(oldPlacements) > i { - p.UID = oldPlacements[i].UID + if machinePoolResource["name"].(string) != "" { + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolVsphereHash(machinePoolResource) + + var err error + machinePool, err := toMachinePoolVsphere(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } + + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) + } else if hash != resourceMachinePoolVsphereHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + oldMachinePool, _ := toMachinePoolVsphere(oldMachinePool) + oldPlacements := oldMachinePool.CloudConfig.Placements + + // set the placement ids + for i, p := range machinePool.CloudConfig.Placements { + if len(oldPlacements) > i { + p.UID = oldPlacements[i].UID + } } + + err = c.UpdateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) } - err = c.UpdateMachinePoolVsphere(cloudConfigId, machinePool) - } + if err != nil { + return diag.FromErr(err) + } - if err != nil { - return diag.FromErr(err) + // Processed (if exists) + delete(osMap, name) } - - // Processed (if exists) - delete(osMap, name) } // Deleted old machine pools @@ -696,15 +627,11 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolVsphere(cloudConfigId, name); err != nil { + if err := c.DeleteMachinePoolVsphere(cloudConfigId, name, ClusterContext); err != nil { return diag.FromErr(err) } } } - //TODO(saamalik) update for cluster as well - //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - // return diag.FromErr(err) - //} diagnostics, done := updateCommonFields(d, c) if done { @@ -716,11 +643,14 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m return diags } -func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroVsphereClusterEntity { - // gnarly, I know! =/ +func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroVsphereClusterEntity, error) { cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) //clientSecret := strfmt.Password(d.Get("azure_client_secret").(string)) + profiles, err := toProfiles(c, d) + if err != nil { + return nil, err + } cluster := &models.V1SpectroVsphereClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -729,7 +659,7 @@ func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spec }, Spec: &models.V1SpectroVsphereClusterEntitySpec{ CloudAccountUID: d.Get("cloud_account_id").(string), - Profiles: toProfiles(c, d), + Profiles: profiles, Policies: toPolicies(d), CloudConfig: toCloudConfigCreate(cloudConfig), }, @@ -737,11 +667,13 @@ func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spec machinePoolConfigs := make([]*models.V1VsphereMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { - mp := toMachinePoolVsphere(machinePool) + mp, err := toMachinePoolVsphere(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } - // sort sort.SliceStable(machinePoolConfigs, func(i, j int) bool { return machinePoolConfigs[i].PoolConfig.IsControlPlane }) @@ -749,7 +681,7 @@ func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spec cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } func toCloudConfigCreate(cloudConfig map[string]interface{}) *models.V1VsphereClusterConfigEntity { @@ -769,11 +701,11 @@ func toCloudConfigCreate(cloudConfig map[string]interface{}) *models.V1VsphereCl func toCloudConfigUpdate(cloudConfig map[string]interface{}) *models.V1VsphereCloudClusterConfigEntity { return &models.V1VsphereCloudClusterConfigEntity{ - toCloudConfigCreate(cloudConfig), + ClusterConfig: toCloudConfigCreate(cloudConfig), } } -func toMachinePoolVsphere(machinePool interface{}) *models.V1VsphereMachinePoolConfigEntity { +func toMachinePoolVsphere(machinePool interface{}) (*models.V1VsphereMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -831,5 +763,19 @@ func toMachinePoolVsphere(machinePool interface{}) *models.V1VsphereMachinePoolC UseControlPlaneAsWorker: controlPlaneAsWorker, }, } - return mp + + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + } + + return mp, nil } diff --git a/templates/index.md.tmpl b/templates/index.md.tmpl index 56c4df61..0f544b85 100644 --- a/templates/index.md.tmpl +++ b/templates/index.md.tmpl @@ -33,7 +33,7 @@ Copy `terraform.template.tfvars` file to a `terraform.tfvars` file and modify it {{tffile "examples/provider/terraform.template.tfvars"}} -> -Be sure to populate the `username`, `password`, and other terraform vars. +Be sure to populate the `sc_host`, `sc_api_key`, and other terraform vars. Copy one of the resource configuration files (e.g: spectrocloud_cluster_profile) from the _Resources_ documentation. Be sure to specify all required parameters. @@ -51,28 +51,14 @@ For an end-to-end example of provisioning Spectro Cloud resources, visit: Credentials and other configurations can be provided through environment variables. The following environment variables are availabe. -- `SPECTROCLOUD_HOST` -- `SPECTROCLOUD_USERNAME` -- `SPECTROCLOUD_PASSWORD` +- `SPECTROCLOUD_HOST` - `SPECTROCLOUD_APIKEY` - `SPECTROCLOUD_TRACE` - `SPECTROCLOUD_RETRY_ATTEMPTS` ## Authentication -You can use the environment variables to authenticate with Spectro Cloud with your username and password. - --> **Note:** The API key takes precendence over the username and password authentication flow. - -```shell -export SPECTROCLOUD_USERNAME=myUserName -export SPECTROCLOUD_PASSWORD=myPassword -``` -```hcl -provider "spectrocloud" {} -``` - -Alternatively, you may use an API key to authenticate with Spectro Cloud. Visit the User Management API Key [documentation](https://docs.spectrocloud.com/user-management/user-authentication/#usingapikey) to learn more about Spectro Cloud API keys. +You can use an API key to authenticate with Spectro Cloud. Visit the User Management API Key [documentation](https://docs.spectrocloud.com/user-management/user-authentication/#usingapikey) to learn more about Spectro Cloud API keys. ```shell export SPECTROCLOUD_APIKEY=5b7aad......... ``` From 5d99a0604007a18d75d96bbb60e55a8760ef6fc4 Mon Sep 17 00:00:00 2001 From: nikolay-spectro Date: Fri, 15 Sep 2023 23:47:06 -0700 Subject: [PATCH 2/4] PLT-587: cherry picking node interval change from main. --- docs/resources/cloudaccount_maas.md | 17 +- docs/resources/cloudaccount_openstack.md | 29 +- docs/resources/cluster_aws.md | 120 ++-- docs/resources/cluster_azure.md | 148 ++--- docs/resources/cluster_maas.md | 115 ++-- docs/resources/cluster_openstack.md | 117 ++-- docs/resources/cluster_vsphere.md | 223 ++++--- go.mod | 59 +- go.sum | 535 ++++++++++++++++ spectrocloud/cluster_common.go | 21 +- spectrocloud/cluster_common_hash_test.go | 3 +- spectrocloud/cluster_node_common.go | 103 --- spectrocloud/resource_cluster_aws.go | 398 ++++++++---- spectrocloud/resource_cluster_azure.go | 442 +++++++++---- spectrocloud/resource_cluster_edge.go | 14 +- spectrocloud/resource_cluster_edge_native.go | 8 +- spectrocloud/resource_cluster_edge_vsphere.go | 364 ++++++++--- spectrocloud/resource_cluster_eks.go | 21 +- spectrocloud/resource_cluster_gcp.go | 339 ++++++++-- spectrocloud/resource_cluster_maas.go | 351 +++++++--- spectrocloud/resource_cluster_openstack.go | 346 +++++++--- spectrocloud/resource_cluster_vsphere.go | 599 ++++++++++-------- 22 files changed, 3074 insertions(+), 1298 deletions(-) create mode 100644 go.sum diff --git a/docs/resources/cloudaccount_maas.md b/docs/resources/cloudaccount_maas.md index 42bf6f6a..252428bf 100644 --- a/docs/resources/cloudaccount_maas.md +++ b/docs/resources/cloudaccount_maas.md @@ -1,4 +1,5 @@ --- +# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cloudaccount_maas Resource - terraform-provider-spectrocloud" subcategory: "" description: |- @@ -7,7 +8,7 @@ description: |- # spectrocloud_cloudaccount_maas (Resource) - + ## Example Usage @@ -19,21 +20,21 @@ resource "spectrocloud_cloudaccount_maas" "maas-1" { } ``` - ## Schema ### Required -- `maas_api_endpoint` (String) Endpoint of the MAAS API that is used to connect to the MAAS cloud. I.e. http://maas:5240/MAAS -- `maas_api_key` (String, Sensitive) API key that is used to connect to the MAAS cloud. -- `name` (String) Name of the MAAS cloud account. -- `private_cloud_gateway_id` (String) ID of the private cloud gateway that is used to connect to the MAAS cloud. +- `name` (String) ### Optional -- `context` (String) The context of the MAAS configuration. Can be `project` or `tenant`. +- `maas_api_endpoint` (String) +- `maas_api_key` (String, Sensitive) +- `private_cloud_gateway_id` (String) ### Read-Only -- `id` (String) The ID of this resource. \ No newline at end of file +- `id` (String) The ID of this resource. + + diff --git a/docs/resources/cloudaccount_openstack.md b/docs/resources/cloudaccount_openstack.md index eeee66b6..40a21424 100644 --- a/docs/resources/cloudaccount_openstack.md +++ b/docs/resources/cloudaccount_openstack.md @@ -1,4 +1,5 @@ --- +# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cloudaccount_openstack Resource - terraform-provider-spectrocloud" subcategory: "" description: |- @@ -7,7 +8,7 @@ description: |- # spectrocloud_cloudaccount_openstack (Resource) - + ## Example Usage @@ -24,27 +25,27 @@ resource "spectrocloud_cloudaccount_openstack" "account" { } ``` - ## Schema ### Required -- `default_domain` (String) The default domain of the OpenStack cloud that is used to connect to the OpenStack cloud. -- `default_project` (String) The default project of the OpenStack cloud that is used to connect to the OpenStack cloud. -- `identity_endpoint` (String) The identity endpoint of the OpenStack cloud that is used to connect to the OpenStack cloud. -- `name` (String) Name of the OpenStack cloud account. -- `openstack_password` (String, Sensitive) The password of the OpenStack cloud that is used to connect to the OpenStack cloud. -- `openstack_username` (String) The username of the OpenStack cloud that is used to connect to the OpenStack cloud. -- `parent_region` (String) The parent region of the OpenStack cloud that is used to connect to the OpenStack cloud. -- `private_cloud_gateway_id` (String) ID of the private cloud gateway that is used to connect to the OpenStack cloud. +- `default_domain` (String) +- `default_project` (String) +- `identity_endpoint` (String) +- `name` (String) +- `openstack_password` (String, Sensitive) +- `openstack_username` (String) +- `parent_region` (String) +- `private_cloud_gateway_id` (String) ### Optional -- `ca_certificate` (String) The CA certificate of the OpenStack cloud that is used to connect to the OpenStack cloud. -- `context` (String) The context of the OpenStack configuration. Can be `project` or `tenant`. -- `openstack_allow_insecure` (Boolean) Whether to allow insecure connections to the OpenStack cloud. Default is `false`. +- `ca_certificate` (String) +- `openstack_allow_insecure` (Boolean) ### Read-Only -- `id` (String) The ID of this resource. \ No newline at end of file +- `id` (String) The ID of this resource. + + diff --git a/docs/resources/cluster_aws.md b/docs/resources/cluster_aws.md index 972cd950..f60e4855 100644 --- a/docs/resources/cluster_aws.md +++ b/docs/resources/cluster_aws.md @@ -1,16 +1,16 @@ --- +# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_aws Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - Resource for managing AWS clusters in Spectro Cloud through Palette. + --- # spectrocloud_cluster_aws (Resource) - Resource for managing AWS clusters in Spectro Cloud through Palette. -## Example Usage +## Example Usage ```terraform data "spectrocloud_cloudaccount_aws" "account" { @@ -119,7 +119,6 @@ resource "spectrocloud_cluster_aws" "cluster" { } ``` - ## Schema @@ -135,23 +134,24 @@ resource "spectrocloud_cluster_aws" "cluster" { - `apply_setting` (String) - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) +- `cluster_profile_id` (String, Deprecated) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) -- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00` -- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. -- `os_patch_schedule` (String) The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`. +- `os_patch_after` (String) +- `os_patch_on_boot` (Boolean) +- `os_patch_schedule` (String) +- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. -- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. +- `skip_completion` (Boolean) +- `tags` (Set of String) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. +- `cloud_config_id` (String) - `id` (String) The ID of this resource. -- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. +- `kubeconfig` (String) - `location_config` (List of Object) (see [below for nested schema](#nestedatt--location_config)) @@ -172,35 +172,34 @@ Optional: Required: -- `count` (Number) Number of nodes in the machine pool. +- `count` (Number) - `instance_type` (String) - `name` (String) Optional: - `additional_labels` (Map of String) -- `additional_security_groups` (Set of String) Additional security groups to attach to the instance. - `az_subnets` (Map of String) Mutually exclusive with `azs`. Use `az_subnets` for Static provisioning. - `azs` (Set of String) Mutually exclusive with `az_subnets`. Use `azs` for Dynamic provisioning. - `capacity_type` (String) Capacity type is an instance type, can be 'on-demand' or 'spot'. Defaults to 'on-demand'. -- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. -- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `control_plane` (Boolean) +- `control_plane_as_worker` (Boolean) - `disk_size_gb` (Number) - `max` (Number) Maximum number of nodes in the machine pool. This is used for autoscaling the machine pool. - `max_price` (String) - `min` (Number) Minimum number of nodes in the machine pool. This is used for autoscaling the machine pool. - `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. +- `update_strategy` (String) ### Nested Schema for `machine_pool.taints` Required: -- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. -- `key` (String) The key of the taint. -- `value` (String) The value of the taint. +- `effect` (String) +- `key` (String) +- `value` (String) @@ -209,58 +208,51 @@ Required: Required: -- `backup_location_id` (String) The ID of the backup location to use for the backup. -- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. -- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. -- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. +- `backup_location_id` (String) +- `expiry_in_hour` (Number) +- `prefix` (String) +- `schedule` (String) Optional: -- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. -- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. -- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. -- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. -- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. +- `include_cluster_resources` (Boolean) +- `include_disks` (Boolean) +- `namespaces` (Set of String) ### Nested Schema for `cluster_profile` -Required: - -- `id` (String) The ID of the cluster profile. - Optional: - `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +Read-Only: + +- `id` (String) The ID of this resource. + ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) The name of the pack. The name must be unique within the cluster profile. +- `name` (String) +- `values` (String) Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. -- `tag` (String) The tag of the pack. The tag is the version of the pack. -- `type` (String) The type of the pack. The default value is `spectro`. -- `uid` (String) -- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. +- `registry_uid` (String) +- `tag` (String) +- `type` (String) ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) The content of the manifest. The content is the YAML content of the manifest. -- `name` (String) The name of the manifest. The name must be unique within the pack. - -Read-Only: - -- `uid` (String) +- `content` (String) +- `name` (String) @@ -270,12 +262,12 @@ Read-Only: Required: -- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. -- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `namespace` (String) +- `role` (Map of String) - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -283,12 +275,12 @@ Optional: Required: -- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. -- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. +- `name` (String) +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. +- `namespace` (String) @@ -308,12 +300,22 @@ Optional: Required: -- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. -- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` +- `name` (String) +- `resource_allocation` (Map of String) + + + +### Nested Schema for `pack` + +Required: + +- `name` (String) +- `tag` (String) +- `values` (String) Optional: -- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` +- `registry_uid` (String) @@ -321,9 +323,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) The schedule for configuration scan. -- `conformance_scan_schedule` (String) The schedule for conformance scan. -- `penetration_scan_schedule` (String) The schedule for penetration scan. +- `configuration_scan_schedule` (String) +- `conformance_scan_schedule` (String) +- `penetration_scan_schedule` (String) @@ -346,4 +348,6 @@ Read-Only: - `latitude` (Number) - `longitude` (Number) - `region_code` (String) -- `region_name` (String) \ No newline at end of file +- `region_name` (String) + + diff --git a/docs/resources/cluster_azure.md b/docs/resources/cluster_azure.md index 10977328..eafee6c4 100644 --- a/docs/resources/cluster_azure.md +++ b/docs/resources/cluster_azure.md @@ -1,13 +1,14 @@ --- +# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_azure Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - Resource for managing Azure clusters in Spectro Cloud through Palette. + --- # spectrocloud_cluster_azure (Resource) - Resource for managing Azure clusters in Spectro Cloud through Palette. + ## Example Usage @@ -81,40 +82,38 @@ resource "spectrocloud_cluster_azure" "cluster" { } ``` - - ## Schema ### Required -- `cloud_account_id` (String) ID of the cloud account to be used for the cluster. This cloud account must be of type `azure`. +- `cloud_account_id` (String) - `cloud_config` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--cloud_config)) - `machine_pool` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--machine_pool)) -- `name` (String) Name of the cluster. This name will be used to create the cluster in Azure. +- `name` (String) ### Optional -- `apply_setting` (String) Apply setting for the cluster. This can be set to `on_create` or `on_update`. +- `apply_setting` (String) - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) -- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00` -- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. -- `os_patch_schedule` (String) Cron schedule for OS patching. This must be in the form of `0 0 * * *`. +- `os_patch_after` (String) +- `os_patch_on_boot` (Boolean) +- `os_patch_schedule` (String) +- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. -- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. +- `skip_completion` (Boolean) +- `tags` (Set of String) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. +- `cloud_config_id` (String) - `id` (String) The ID of this resource. -- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. +- `kubeconfig` (String) - `location_config` (List of Object) (see [below for nested schema](#nestedatt--location_config)) @@ -122,10 +121,10 @@ resource "spectrocloud_cluster_azure" "cluster" { Required: -- `region` (String) Azure region. This can be found in the Azure portal under `Resource groups`. -- `resource_group` (String) Azure resource group. This can be found in the Azure portal under `Resource groups`. -- `ssh_key` (String) SSH key to be used for the cluster nodes. -- `subscription_id` (String) Azure subscription ID. This can be found in the Azure portal under `Subscriptions`. +- `region` (String) +- `resource_group` (String) +- `ssh_key` (String) +- `subscription_id` (String) @@ -133,30 +132,30 @@ Required: Required: -- `azs` (Set of String) Availability zones for the machine pool. -- `count` (Number) Number of nodes in the machine pool. -- `instance_type` (String) Azure instance type from the Azure portal. -- `name` (String) Name of the machine pool. This must be unique within the cluster. +- `azs` (Set of String) +- `count` (Number) +- `instance_type` (String) +- `is_system_node_pool` (Boolean) +- `name` (String) Optional: - `additional_labels` (Map of String) -- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. -- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. -- `disk` (Block List, Max: 1) Disk configuration for the machine pool. (see [below for nested schema](#nestedblock--machine_pool--disk)) -- `is_system_node_pool` (Boolean) Whether this machine pool is a system node pool. Default value is `false'. +- `control_plane` (Boolean) +- `control_plane_as_worker` (Boolean) +- `disk` (Block List, Max: 1) (see [below for nested schema](#nestedblock--machine_pool--disk)) - `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. -- `os_type` (String) Operating system type for the machine pool. Valid values are `Linux` and `Windows`. Defaults to `Linux`. +- `os_type` (String) - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. +- `update_strategy` (String) ### Nested Schema for `machine_pool.disk` Required: -- `size_gb` (Number) Size of the disk in GB. -- `type` (String) Type of the disk. Valid values are `Standard_LRS`, `StandardSSD_LRS`, `Premium_LRS`. +- `size_gb` (Number) +- `type` (String) @@ -164,9 +163,9 @@ Required: Required: -- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. -- `key` (String) The key of the taint. -- `value` (String) The value of the taint. +- `effect` (String) +- `key` (String) +- `value` (String) @@ -175,58 +174,51 @@ Required: Required: -- `backup_location_id` (String) The ID of the backup location to use for the backup. -- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. -- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. -- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. +- `backup_location_id` (String) +- `expiry_in_hour` (Number) +- `prefix` (String) +- `schedule` (String) Optional: -- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. -- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. -- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. -- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. -- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. +- `include_cluster_resources` (Boolean) +- `include_disks` (Boolean) +- `namespaces` (Set of String) ### Nested Schema for `cluster_profile` -Required: - -- `id` (String) The ID of the cluster profile. - Optional: - `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +Read-Only: + +- `id` (String) The ID of this resource. + ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) The name of the pack. The name must be unique within the cluster profile. +- `name` (String) +- `values` (String) Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. -- `tag` (String) The tag of the pack. The tag is the version of the pack. -- `type` (String) The type of the pack. The default value is `spectro`. -- `uid` (String) -- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. +- `registry_uid` (String) +- `tag` (String) +- `type` (String) ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) The content of the manifest. The content is the YAML content of the manifest. -- `name` (String) The name of the manifest. The name must be unique within the pack. - -Read-Only: - -- `uid` (String) +- `content` (String) +- `name` (String) @@ -236,12 +228,12 @@ Read-Only: Required: -- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. -- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `namespace` (String) +- `role` (Map of String) - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -249,12 +241,12 @@ Optional: Required: -- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. -- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. +- `name` (String) +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. +- `namespace` (String) @@ -274,12 +266,22 @@ Optional: Required: -- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. -- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` +- `name` (String) +- `resource_allocation` (Map of String) + + + +### Nested Schema for `pack` + +Required: + +- `name` (String) +- `tag` (String) +- `values` (String) Optional: -- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` +- `registry_uid` (String) @@ -287,9 +289,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) The schedule for configuration scan. -- `conformance_scan_schedule` (String) The schedule for conformance scan. -- `penetration_scan_schedule` (String) The schedule for penetration scan. +- `configuration_scan_schedule` (String) +- `conformance_scan_schedule` (String) +- `penetration_scan_schedule` (String) @@ -312,4 +314,6 @@ Read-Only: - `latitude` (Number) - `longitude` (Number) - `region_code` (String) -- `region_name` (String) \ No newline at end of file +- `region_name` (String) + + diff --git a/docs/resources/cluster_maas.md b/docs/resources/cluster_maas.md index 2b32c76e..9266c75c 100644 --- a/docs/resources/cluster_maas.md +++ b/docs/resources/cluster_maas.md @@ -1,13 +1,14 @@ --- +# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_maas Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - Resource for managing MAAS clusters in Spectro Cloud through Palette. + --- # spectrocloud_cluster_maas (Resource) - Resource for managing MAAS clusters in Spectro Cloud through Palette. + ## Example Usage @@ -88,7 +89,6 @@ resource "spectrocloud_cluster_maas" "cluster" { } ``` - ## Schema @@ -105,23 +105,23 @@ resource "spectrocloud_cluster_maas" "cluster" { - `cloud_account_id` (String) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) -- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `location_config` (Block List) (see [below for nested schema](#nestedblock--location_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00` -- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. -- `os_patch_schedule` (String) Cron schedule for OS patching. This must be in the form of `0 0 * * *`. +- `os_patch_after` (String) +- `os_patch_on_boot` (Boolean) +- `os_patch_schedule` (String) +- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. -- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. +- `skip_completion` (Boolean) +- `tags` (Set of String) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. +- `cloud_config_id` (String) - `id` (String) The ID of this resource. -- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. +- `kubeconfig` (String) ### Nested Schema for `cloud_config` @@ -145,13 +145,13 @@ Required: Optional: - `additional_labels` (Map of String) -- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. -- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `control_plane` (Boolean) +- `control_plane_as_worker` (Boolean) - `max` (Number) Maximum number of nodes in the machine pool. This is used for autoscaling the machine pool. - `min` (Number) Minimum number of nodes in the machine pool. This is used for autoscaling the machine pool. - `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. +- `update_strategy` (String) ### Nested Schema for `machine_pool.instance_type` @@ -179,9 +179,9 @@ Read-Only: Required: -- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. -- `key` (String) The key of the taint. -- `value` (String) The value of the taint. +- `effect` (String) +- `key` (String) +- `value` (String) @@ -190,58 +190,51 @@ Required: Required: -- `backup_location_id` (String) The ID of the backup location to use for the backup. -- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. -- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. -- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. +- `backup_location_id` (String) +- `expiry_in_hour` (Number) +- `prefix` (String) +- `schedule` (String) Optional: -- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. -- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. -- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. -- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. -- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. +- `include_cluster_resources` (Boolean) +- `include_disks` (Boolean) +- `namespaces` (Set of String) ### Nested Schema for `cluster_profile` -Required: - -- `id` (String) The ID of the cluster profile. - Optional: - `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +Read-Only: + +- `id` (String) The ID of this resource. + ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) The name of the pack. The name must be unique within the cluster profile. +- `name` (String) +- `values` (String) Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. -- `tag` (String) The tag of the pack. The tag is the version of the pack. -- `type` (String) The type of the pack. The default value is `spectro`. -- `uid` (String) -- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. +- `registry_uid` (String) +- `tag` (String) +- `type` (String) ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) The content of the manifest. The content is the YAML content of the manifest. -- `name` (String) The name of the manifest. The name must be unique within the pack. - -Read-Only: - -- `uid` (String) +- `content` (String) +- `name` (String) @@ -251,12 +244,12 @@ Read-Only: Required: -- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. -- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `namespace` (String) +- `role` (Map of String) - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -264,12 +257,12 @@ Optional: Required: -- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. -- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. +- `name` (String) +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. +- `namespace` (String) @@ -305,12 +298,22 @@ Optional: Required: -- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. -- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` +- `name` (String) +- `resource_allocation` (Map of String) + + + +### Nested Schema for `pack` + +Required: + +- `name` (String) +- `tag` (String) +- `values` (String) Optional: -- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` +- `registry_uid` (String) @@ -318,9 +321,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) The schedule for configuration scan. -- `conformance_scan_schedule` (String) The schedule for conformance scan. -- `penetration_scan_schedule` (String) The schedule for penetration scan. +- `configuration_scan_schedule` (String) +- `conformance_scan_schedule` (String) +- `penetration_scan_schedule` (String) @@ -330,4 +333,6 @@ Optional: - `create` (String) - `delete` (String) -- `update` (String) \ No newline at end of file +- `update` (String) + + diff --git a/docs/resources/cluster_openstack.md b/docs/resources/cluster_openstack.md index 9bffa9e2..194fbf25 100644 --- a/docs/resources/cluster_openstack.md +++ b/docs/resources/cluster_openstack.md @@ -1,13 +1,14 @@ --- +# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_openstack Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - Resource for managing Openstack clusters in Spectro Cloud through Palette. + --- # spectrocloud_cluster_openstack (Resource) - Resource for managing Openstack clusters in Spectro Cloud through Palette. + ## Example Usage @@ -79,7 +80,6 @@ resource "spectrocloud_cluster_openstack" "cluster" { } ``` - ## Schema @@ -96,23 +96,23 @@ resource "spectrocloud_cluster_openstack" "cluster" { - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) -- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `location_config` (Block List) (see [below for nested schema](#nestedblock--location_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00` -- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. -- `os_patch_schedule` (String) Cron schedule for OS patching. This must be in the form of `0 0 * * *`. +- `os_patch_after` (String) +- `os_patch_on_boot` (Boolean) +- `os_patch_schedule` (String) +- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. -- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. +- `skip_completion` (Boolean) +- `tags` (Set of String) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. +- `cloud_config_id` (String) - `id` (String) The ID of this resource. -- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. +- `kubeconfig` (String) ### Nested Schema for `cloud_config` @@ -137,7 +137,7 @@ Optional: Required: -- `count` (Number) Number of nodes in the machine pool. +- `count` (Number) - `instance_type` (String) - `name` (String) @@ -145,21 +145,21 @@ Optional: - `additional_labels` (Map of String) - `azs` (Set of String) -- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. -- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `control_plane` (Boolean) +- `control_plane_as_worker` (Boolean) - `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `subnet_id` (String) - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. +- `update_strategy` (String) ### Nested Schema for `machine_pool.taints` Required: -- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. -- `key` (String) The key of the taint. -- `value` (String) The value of the taint. +- `effect` (String) +- `key` (String) +- `value` (String) @@ -168,58 +168,51 @@ Required: Required: -- `backup_location_id` (String) The ID of the backup location to use for the backup. -- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. -- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. -- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. +- `backup_location_id` (String) +- `expiry_in_hour` (Number) +- `prefix` (String) +- `schedule` (String) Optional: -- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. -- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. -- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. -- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. -- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. +- `include_cluster_resources` (Boolean) +- `include_disks` (Boolean) +- `namespaces` (Set of String) ### Nested Schema for `cluster_profile` -Required: - -- `id` (String) The ID of the cluster profile. - Optional: - `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +Read-Only: + +- `id` (String) The ID of this resource. + ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) The name of the pack. The name must be unique within the cluster profile. +- `name` (String) +- `values` (String) Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. -- `tag` (String) The tag of the pack. The tag is the version of the pack. -- `type` (String) The type of the pack. The default value is `spectro`. -- `uid` (String) -- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. +- `registry_uid` (String) +- `tag` (String) +- `type` (String) ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) The content of the manifest. The content is the YAML content of the manifest. -- `name` (String) The name of the manifest. The name must be unique within the pack. - -Read-Only: - -- `uid` (String) +- `content` (String) +- `name` (String) @@ -229,12 +222,12 @@ Read-Only: Required: -- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. -- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `namespace` (String) +- `role` (Map of String) - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -242,12 +235,12 @@ Optional: Required: -- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. -- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. +- `name` (String) +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. +- `namespace` (String) @@ -283,12 +276,22 @@ Optional: Required: -- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. -- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` +- `name` (String) +- `resource_allocation` (Map of String) + + + +### Nested Schema for `pack` + +Required: + +- `name` (String) +- `tag` (String) +- `values` (String) Optional: -- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` +- `registry_uid` (String) @@ -296,9 +299,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) The schedule for configuration scan. -- `conformance_scan_schedule` (String) The schedule for conformance scan. -- `penetration_scan_schedule` (String) The schedule for penetration scan. +- `configuration_scan_schedule` (String) +- `conformance_scan_schedule` (String) +- `penetration_scan_schedule` (String) @@ -308,4 +311,6 @@ Optional: - `create` (String) - `delete` (String) -- `update` (String) \ No newline at end of file +- `update` (String) + + diff --git a/docs/resources/cluster_vsphere.md b/docs/resources/cluster_vsphere.md index 11de9c3c..45aa6f9b 100644 --- a/docs/resources/cluster_vsphere.md +++ b/docs/resources/cluster_vsphere.md @@ -1,69 +1,133 @@ --- +# generated by https://github.com/hashicorp/terraform-plugin-docs page_title: "spectrocloud_cluster_vsphere Resource - terraform-provider-spectrocloud" subcategory: "" description: |- - A resource to manage a vSphere cluster in Pallette. + --- # spectrocloud_cluster_vsphere (Resource) - A resource to manage a vSphere cluster in Pallette. - -## Example Usage +## Example Usage +```terraform +data "spectrocloud_cluster_profile" "vmware_profile" { + name = "vsphere-picard-2" + version = "1.0.0" + context = "tenant" +} +data "spectrocloud_cloudaccount_vsphere" "vmware_account" { + name = var.shared_vmware_cloud_account_name +} + + +resource "spectrocloud_cluster_vsphere" "cluster" { + name = "vsphere-picard-3" + cloud_account_id = data.spectrocloud_cloudaccount_vsphere.vmware_account.id + cluster_profile { + id = data.spectrocloud_cluster_profile.vmware_profile.id + } + cloud_config { + ssh_key = var.cluster_ssh_public_key + + datacenter = var.vsphere_datacenter + folder = var.vsphere_folder + // For Dynamic DNS (network_type & network_search_domain value should set for DDNS) + network_type = "DDNS" + network_search_domain = var.cluster_network_search + // For Static (By Default static_ip is false, for static provisioning, it is set to be true. Not required to specify network_type & network_search_domain) + # static_ip = true + } + + machine_pool { + control_plane = true + control_plane_as_worker = true + name = "master-pool" + count = 1 + placement { + cluster = var.vsphere_cluster + resource_pool = var.vsphere_resource_pool + datastore = var.vsphere_datastore + network = var.vsphere_network + } + instance_type { + disk_size_gb = 40 + memory_mb = 4096 + cpu = 2 + } + } + + machine_pool { + name = "worker-basic" + count = 1 + node_repave_interval = 30 + placement { + cluster = var.vsphere_cluster + resource_pool = var.vsphere_resource_pool + datastore = var.vsphere_datastore + network = var.vsphere_network + } + instance_type { + disk_size_gb = 40 + memory_mb = 8192 + cpu = 4 + } + } +} +``` ## Schema ### Required -- `cloud_account_id` (String) ID of the cloud account to be used for the cluster. This cloud account must be of type `vsphere`. +- `cloud_account_id` (String) - `cloud_config` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--cloud_config)) - `machine_pool` (Block Set, Min: 1) (see [below for nested schema](#nestedblock--machine_pool)) -- `name` (String) The name of the cluster. +- `name` (String) ### Optional -- `apply_setting` (String) The setting to apply the cluster profile. `DownloadAndInstall` will download and install packs in one action. `DownloadAndInstallLater` will only download artifact and postpone install for later. Default value is `DownloadAndInstall`. +- `apply_setting` (String) - `backup_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) - `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) +- `cluster_profile_id` (String, Deprecated) - `cluster_rbac_binding` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding)) -- `context` (String) - `host_config` (Block List) (see [below for nested schema](#nestedblock--host_config)) - `location_config` (Block List) (see [below for nested schema](#nestedblock--location_config)) - `namespaces` (Block List) (see [below for nested schema](#nestedblock--namespaces)) -- `os_patch_after` (String) The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00` -- `os_patch_on_boot` (Boolean) Whether to apply OS patch on boot. Default is `false`. -- `os_patch_schedule` (String) The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`. +- `os_patch_after` (String) +- `os_patch_on_boot` (Boolean) +- `os_patch_schedule` (String) +- `pack` (Block List) (see [below for nested schema](#nestedblock--pack)) - `scan_policy` (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) -- `skip_completion` (Boolean) If `true`, the cluster will be created asynchronously. Default value is `false`. -- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`. +- `skip_completion` (Boolean) +- `tags` (Set of String) - `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) ### Read-Only -- `cloud_config_id` (String, Deprecated) ID of the cloud config used for the cluster. This cloud config must be of type `azure`. +- `cloud_config_id` (String) - `id` (String) The ID of this resource. -- `kubeconfig` (String) Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`. +- `kubeconfig` (String) ### Nested Schema for `cloud_config` Required: -- `datacenter` (String) The name of the datacenter in vSphere. This is the name of the datacenter as it appears in vSphere. -- `folder` (String) The name of the folder in vSphere. This is the name of the folder as it appears in vSphere. -- `ssh_key` (String) The SSH key to be used for the cluster. This is the public key that will be used to access the cluster. +- `datacenter` (String) +- `folder` (String) +- `ssh_key` (String) Optional: -- `image_template_folder` (String) The name of the image template folder in vSphere. This is the name of the folder as it appears in vSphere. -- `network_search_domain` (String) The search domain to use for the cluster in case of DHCP. -- `network_type` (String) The type of network to use for the cluster. This can be `VIP` or `DDNS`. -- `ntp_servers` (Set of String) A list of NTP servers to be used by the cluster. -- `static_ip` (Boolean) Whether to use static IP addresses for the cluster. If `true`, the cluster will use static IP addresses. If `false`, the cluster will use DDNS. Default is `false`. +- `image_template_folder` (String) +- `network_search_domain` (String) +- `network_type` (String) +- `static_ip` (Boolean) @@ -71,28 +135,28 @@ Optional: Required: -- `count` (Number) Number of nodes in the machine pool. +- `count` (Number) - `instance_type` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--machine_pool--instance_type)) -- `name` (String) The name of the machine pool. This is used to identify the machine pool in the cluster. +- `name` (String) - `placement` (Block List, Min: 1) (see [below for nested schema](#nestedblock--machine_pool--placement)) Optional: - `additional_labels` (Map of String) -- `control_plane` (Boolean) Whether this machine pool is a control plane. Defaults to `false`. -- `control_plane_as_worker` (Boolean) Whether this machine pool is a control plane and a worker. Defaults to `false`. +- `control_plane` (Boolean) +- `control_plane_as_worker` (Boolean) - `node_repave_interval` (Number) Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools. - `taints` (Block List) (see [below for nested schema](#nestedblock--machine_pool--taints)) -- `update_strategy` (String) Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`. +- `update_strategy` (String) ### Nested Schema for `machine_pool.instance_type` Required: -- `cpu` (Number) The number of CPUs. -- `disk_size_gb` (Number) The size of the disk in GB. -- `memory_mb` (Number) The amount of memory in MB. +- `cpu` (Number) +- `disk_size_gb` (Number) +- `memory_mb` (Number) @@ -100,14 +164,14 @@ Required: Required: -- `cluster` (String) The name of the cluster to use for the machine pool. As it appears in the vSphere. -- `datastore` (String) The name of the datastore to use for the machine pool. As it appears in the vSphere. -- `network` (String) The name of the network to use for the machine pool. As it appears in the vSphere. -- `resource_pool` (String) The name of the resource pool to use for the machine pool. As it appears in the vSphere. +- `cluster` (String) +- `datastore` (String) +- `network` (String) +- `resource_pool` (String) Optional: -- `static_ip_pool_id` (String) The ID of the static IP pool to use for the machine pool in case of static cluster placement. +- `static_ip_pool_id` (String) Read-Only: @@ -119,9 +183,9 @@ Read-Only: Required: -- `effect` (String) The effect of the taint. Allowed values are: `NoSchedule`, `PreferNoSchedule` or `NoExecute`. -- `key` (String) The key of the taint. -- `value` (String) The value of the taint. +- `effect` (String) +- `key` (String) +- `value` (String) @@ -130,58 +194,51 @@ Required: Required: -- `backup_location_id` (String) The ID of the backup location to use for the backup. -- `expiry_in_hour` (Number) The number of hours after which the backup will be deleted. For example, if the expiry is set to 24, the backup will be deleted after 24 hours. -- `prefix` (String) Prefix for the backup name. The backup name will be of the format --. -- `schedule` (String) The schedule for the backup. The schedule is specified in cron format. For example, to run the backup every day at 1:00 AM, the schedule should be set to `0 1 * * *`. +- `backup_location_id` (String) +- `expiry_in_hour` (Number) +- `prefix` (String) +- `schedule` (String) Optional: -- `cluster_uids` (Set of String) The list of cluster UIDs to include in the backup. If `include_all_clusters` is set to `true`, then all clusters will be included. -- `include_all_clusters` (Boolean) Whether to include all clusters in the backup. If set to false, only the clusters specified in `cluster_uids` will be included. -- `include_cluster_resources` (Boolean) Whether to include the cluster resources in the backup. If set to false, only the cluster configuration and disks will be backed up. -- `include_disks` (Boolean) Whether to include the disks in the backup. If set to false, only the cluster configuration will be backed up. -- `namespaces` (Set of String) The list of Kubernetes namespaces to include in the backup. If not specified, all namespaces will be included. +- `include_cluster_resources` (Boolean) +- `include_disks` (Boolean) +- `namespaces` (Set of String) ### Nested Schema for `cluster_profile` -Required: - -- `id` (String) The ID of the cluster profile. - Optional: - `pack` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) +Read-Only: + +- `id` (String) The ID of this resource. + ### Nested Schema for `cluster_profile.pack` Required: -- `name` (String) The name of the pack. The name must be unique within the cluster profile. +- `name` (String) +- `values` (String) Optional: - `manifest` (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack--manifest)) -- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. -- `tag` (String) The tag of the pack. The tag is the version of the pack. -- `type` (String) The type of the pack. The default value is `spectro`. -- `uid` (String) -- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format. +- `registry_uid` (String) +- `tag` (String) +- `type` (String) ### Nested Schema for `cluster_profile.pack.manifest` Required: -- `content` (String) The content of the manifest. The content is the YAML content of the manifest. -- `name` (String) The name of the manifest. The name must be unique within the pack. - -Read-Only: - -- `uid` (String) +- `content` (String) +- `name` (String) @@ -191,12 +248,12 @@ Read-Only: Required: -- `type` (String) The type of the RBAC binding. Can be one of the following values: `RoleBinding`, or `ClusterRoleBinding`. +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the RBAC binding. Required if 'type' is set to 'RoleBinding'. -- `role` (Map of String) The role of the RBAC binding. Required if 'type' is set to 'RoleBinding'. +- `namespace` (String) +- `role` (Map of String) - `subjects` (Block List) (see [below for nested schema](#nestedblock--cluster_rbac_binding--subjects)) @@ -204,12 +261,12 @@ Optional: Required: -- `name` (String) The name of the subject. Required if 'type' is set to 'User' or 'Group'. -- `type` (String) The type of the subject. Can be one of the following values: `User`, `Group`, or `ServiceAccount`. +- `name` (String) +- `type` (String) Optional: -- `namespace` (String) The Kubernetes namespace of the subject. Required if 'type' is set to 'ServiceAccount'. +- `namespace` (String) @@ -245,12 +302,22 @@ Optional: Required: -- `name` (String) Name of the namespace. This is the name of the Kubernetes namespace in the cluster. -- `resource_allocation` (Map of String) Resource allocation for the namespace. This is a map containing the resource type and the resource value. For example, `{cpu_cores: '2', memory_MiB: '2048'}` +- `name` (String) +- `resource_allocation` (Map of String) + + + +### Nested Schema for `pack` + +Required: + +- `name` (String) +- `tag` (String) +- `values` (String) Optional: -- `images_blacklist` (List of String) List of images to disallow for the namespace. For example, `['nginx:latest', 'redis:latest']` +- `registry_uid` (String) @@ -258,9 +325,9 @@ Optional: Required: -- `configuration_scan_schedule` (String) The schedule for configuration scan. -- `conformance_scan_schedule` (String) The schedule for conformance scan. -- `penetration_scan_schedule` (String) The schedule for penetration scan. +- `configuration_scan_schedule` (String) +- `conformance_scan_schedule` (String) +- `penetration_scan_schedule` (String) @@ -270,4 +337,6 @@ Optional: - `create` (String) - `delete` (String) -- `update` (String) \ No newline at end of file +- `update` (String) + + diff --git a/go.mod b/go.mod index 23ba3dab..0ee443c2 100644 --- a/go.mod +++ b/go.mod @@ -5,12 +5,13 @@ go 1.18 require ( github.com/go-openapi/runtime v0.19.28 github.com/go-openapi/strfmt v0.20.1 + github.com/google/go-cmp v0.5.9 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-docs v0.13.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 github.com/pkg/errors v0.9.1 github.com/robfig/cron v1.2.0 - github.com/spectrocloud/hapi v1.14.1-0.20230814141242-394093e7fedb + github.com/spectrocloud/hapi v1.14.1-0.20230915073348-759e2eb74641 github.com/stretchr/testify v1.7.2 ) @@ -18,13 +19,15 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.1.1 // indirect github.com/Masterminds/sprig/v3 v3.2.2 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/agext/levenshtein v1.2.2 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/go-errors/errors v1.4.0 // indirect @@ -37,35 +40,34 @@ require ( github.com/go-openapi/swag v0.19.14 // indirect github.com/go-openapi/validate v0.20.2 // indirect github.com/go-stack/stack v1.8.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.2.1 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.6 // indirect + github.com/hashicorp/go-plugin v1.5.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/hcl/v2 v2.15.0 // indirect + github.com/hashicorp/hc-install v0.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.18.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.17.3 // indirect - github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.14.1 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect - github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect - github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect + github.com/hashicorp/terraform-exec v0.19.0 // indirect + github.com/hashicorp/terraform-json v0.17.1 // indirect + github.com/hashicorp/terraform-plugin-go v0.19.0 // indirect + github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.2 // indirect + github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mitchellh/cli v1.1.4 // indirect + github.com/mitchellh/cli v1.1.5 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.0 // indirect @@ -82,18 +84,19 @@ require ( github.com/spectrocloud/gomi v1.14.1-0.20230412095143-b0595c6c6f08 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.1 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/zclconf/go-cty v1.14.0 // indirect go.mongodb.org/mongo-driver v1.5.1 // indirect - golang.org/x/crypto v0.5.0 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect - google.golang.org/grpc v1.50.1 // indirect - google.golang.org/protobuf v1.28.1 // indirect + golang.org/x/crypto v0.13.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.13.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/grpc v1.57.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..c3e130ca --- /dev/null +++ b/go.sum @@ -0,0 +1,535 @@ +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs= +github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-errors/errors v1.4.0 h1:2OA7MFw38+e9na72T1xgkomPb6GzZzzxvJ5U630FoRM= +github.com/go-errors/errors v1.4.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= +github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.19.28 h1:9lYu6axek8LJrVkMVViVirRcpoaCxXX7+sSvmizGVnA= +github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.5.1 h1:oGm7cWBaYIp3lJpx1RUEfLWophprE2EV/KUeqBYo+6k= +github.com/hashicorp/go-plugin v1.5.1/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.6.0 h1:fDHnU7JNFNSQebVKYhHZ0va1bC6SrPQ8fpebsvNr2w4= +github.com/hashicorp/hc-install v0.6.0/go.mod h1:10I912u3nntx9Umo1VAeYPUUuehk0aRQJYpMwbX5wQA= +github.com/hashicorp/hcl/v2 v2.18.0 h1:wYnG7Lt31t2zYkcquwgKo6MWXzRUDIeIVU5naZwHLl8= +github.com/hashicorp/hcl/v2 v2.18.0/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= +github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= +github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= +github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= +github.com/hashicorp/terraform-plugin-docs v0.13.0 h1:6e+VIWsVGb6jYJewfzq2ok2smPzZrt1Wlm9koLeKazY= +github.com/hashicorp/terraform-plugin-docs v0.13.0/go.mod h1:W0oCmHAjIlTHBbvtppWHe8fLfZ2BznQbuv8+UD8OucQ= +github.com/hashicorp/terraform-plugin-go v0.19.0 h1:BuZx/6Cp+lkmiG0cOBk6Zps0Cb2tmqQpDM3iAtnhDQU= +github.com/hashicorp/terraform-plugin-go v0.19.0/go.mod h1:EhRSkEPNoylLQntYsk5KrDHTZJh9HQoumZXbOGOXmec= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0 h1:wcOKYwPI9IorAJEBLzgclh3xVolO7ZorYd6U1vnok14= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0/go.mod h1:qH/34G25Ugdj5FcM95cSoXzUgIbgfhVLXCcEcYaMwq8= +github.com/hashicorp/terraform-registry-address v0.2.2 h1:lPQBg403El8PPicg/qONZJDC6YlgCVbWDtNmmZKtBno= +github.com/hashicorp/terraform-registry-address v0.2.2/go.mod h1:LtwNbCihUoUZ3RYriyS2wF/lGPB6gF9ICLRtuDk7hSo= +github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= +github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/spectrocloud/gomi v1.14.1-0.20230412095143-b0595c6c6f08 h1:AnOC0U+ExlKBeT5yF2Pg8PPfVOfxwOUBS/5deOl1Q4Y= +github.com/spectrocloud/gomi v1.14.1-0.20230412095143-b0595c6c6f08/go.mod h1:UnhUDpFEvtYh6m384r3xzj8/+Z6/hMp2O8whEMYVHec= +github.com/spectrocloud/hapi v1.14.1-0.20230915073348-759e2eb74641 h1:y25iXyYvWaxOxamKxh3YjPdsbMxMvVP8Z5YuPkV/wBE= +github.com/spectrocloud/hapi v1.14.1-0.20230915073348-759e2eb74641/go.mod h1:O/Bkbw92QPSGPNQPqKt7Qlkn+9BKK/a22KTUlk76KHI= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.14.0 h1:/Xrd39K7DXbHzlisFP9c4pHao4yyf+/Ug9LEz+Y/yhc= +github.com/zclconf/go-cty v1.14.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= +golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/spectrocloud/cluster_common.go b/spectrocloud/cluster_common.go index 5649fc35..f5322efe 100644 --- a/spectrocloud/cluster_common.go +++ b/spectrocloud/cluster_common.go @@ -2,9 +2,11 @@ package spectrocloud import ( "errors" + "fmt" + "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "strings" ) var ( @@ -58,3 +60,20 @@ func toSSHKeys(cloudConfig map[string]interface{}) ([]string, error) { } return sshKeys, nil } + +func FlattenControlPlaneAndRepaveInterval(isControlPlane *bool, oi map[string]interface{}, nodeRepaveInterval int32) { + if isControlPlane != nil { + oi["control_plane"] = *isControlPlane + if !*isControlPlane { + oi["node_repave_interval"] = int32(nodeRepaveInterval) + } + } +} + +func ValidationNodeRepaveIntervalForControlPlane(nodeRepaveInterval int) error { + if nodeRepaveInterval != 0 { + errMsg := fmt.Sprintf("Validation error: The `node_repave_interval` attribute is not applicable for the control plane. Attempted value: %d.", nodeRepaveInterval) + return errors.New(errMsg) + } + return nil +} diff --git a/spectrocloud/cluster_common_hash_test.go b/spectrocloud/cluster_common_hash_test.go index fe50da64..9c33241f 100644 --- a/spectrocloud/cluster_common_hash_test.go +++ b/spectrocloud/cluster_common_hash_test.go @@ -2,9 +2,10 @@ package spectrocloud import ( "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "testing" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" ) diff --git a/spectrocloud/cluster_node_common.go b/spectrocloud/cluster_node_common.go index e0ce2a3b..356a6a95 100644 --- a/spectrocloud/cluster_node_common.go +++ b/spectrocloud/cluster_node_common.go @@ -1,13 +1,7 @@ package spectrocloud import ( - "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" - "log" - "time" ) var NodeMaintenanceLifecycleStates = []string{ @@ -20,100 +14,3 @@ var NodeMaintenanceLifecycleStates = []string{ type GetMaintenanceStatus func(string, string, string, string) (*models.V1MachineMaintenanceStatus, error) type GetNodeStatusMap func(string, string, string) (map[string]models.V1CloudMachineStatus, error) - -func waitForNodeMaintenanceCompleted(c *client.V1Client, ctx context.Context, fn GetMaintenanceStatus, ClusterContext string, ConfigUID string, MachineName string, NodeId string) (error, bool) { - - stateConf := &retry.StateChangeConf{ - Delay: 30 * time.Second, - Pending: NodeMaintenanceLifecycleStates, - Target: []string{"Completed"}, - Refresh: resourceClusterNodeMaintenanceRefreshFunc(c, fn, ClusterContext, ConfigUID, MachineName, NodeId), - Timeout: 30 * time.Minute, - MinTimeout: 10 * time.Second, - } - - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) - if err != nil { - return err, true - } - return nil, false -} - -func resourceClusterNodeMaintenanceRefreshFunc(c *client.V1Client, fn GetMaintenanceStatus, ClusterContext string, ConfigUID string, MachineName string, NodeId string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - nmStatus, err := c.GetNodeMaintenanceStatus(client.GetMaintenanceStatus(fn), ClusterContext, ConfigUID, MachineName, NodeId) - if err != nil { - return nil, "", err - } - - state := nmStatus.State - log.Printf("Node maintenance state (%s): %s", NodeId, state) - - return nmStatus, state, nil - } -} - -func resourceNodeAction(c *client.V1Client, ctx context.Context, newMachinePool interface{}, fn GetMaintenanceStatus, CloudType string, ClusterContext string, ConfigUID string, MachineName string) error { - newNodes := newMachinePool.(map[string]interface{})["node"] - if newNodes != nil { - for _, n := range newNodes.([]interface{}) { - node := n.(map[string]interface{}) - nodeMaintenanceStatus, err := c.GetNodeMaintenanceStatus(client.GetMaintenanceStatus(fn), ClusterContext, ConfigUID, MachineName, node["node_id"].(string)) - if err != nil { - return err - } - if node["action"] != nodeMaintenanceStatus.Action { - nm := &models.V1MachineMaintenance{ - Action: node["action"].(string), - } - err := c.ToggleMaintenanceOnNode(nm, CloudType, ClusterContext, ConfigUID, MachineName, node["node_id"].(string)) - if err != nil { - return err - } - err, isError := waitForNodeMaintenanceCompleted(c, ctx, fn, ClusterContext, ConfigUID, MachineName, node["node_id"].(string)) - if isError { - return err - } - } - } - } - return nil -} - -func flattenNodeMaintenanceStatus(c *client.V1Client, d *schema.ResourceData, fn GetNodeStatusMap, mPools []interface{}, cloudConfigId string, ClusterContext string) ([]interface{}, error) { - _, n := d.GetChange("machine_pool") - nsMap := make(map[string]interface{}) - for _, mp := range n.(*schema.Set).List() { - machinePool := mp.(map[string]interface{}) - nsMap[machinePool["name"].(string)] = machinePool - } - - for i, mp := range mPools { - m := mp.(map[string]interface{}) - // For handling unit test - if _, ok := nsMap[m["name"].(string)]; !ok { - return mPools, nil - } - - newNodeList := nsMap[m["name"].(string)].(map[string]interface{})["node"].([]interface{}) - if len(newNodeList) > 0 { - var nodes []interface{} - nodesStatus, err := fn(cloudConfigId, m["name"].(string), ClusterContext) - if err != nil { - return nil, err - } - for key, value := range nodesStatus { - for _, newNode := range newNodeList { - if newNode.(map[string]interface{})["node_id"] == key { - nodes = append(nodes, c.GetNodeValue(key, value.MaintenanceStatus.Action)) - } - } - } - if nodes != nil { - mPools[i].(map[string]interface{})["node"] = nodes - } - } - } - return mPools, nil -} diff --git a/spectrocloud/resource_cluster_aws.go b/spectrocloud/resource_cluster_aws.go index 7fb3d967..7f4696b4 100644 --- a/spectrocloud/resource_cluster_aws.go +++ b/spectrocloud/resource_cluster_aws.go @@ -3,7 +3,7 @@ package spectrocloud import ( "context" "log" - "sort" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -14,7 +14,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" + + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) func resourceClusterAws() *schema.Resource { @@ -23,7 +24,6 @@ func resourceClusterAws() *schema.Resource { ReadContext: resourceClusterAwsRead, UpdateContext: resourceClusterAwsUpdate, DeleteContext: resourceClusterDelete, - Description: "Resource for managing AWS clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -38,12 +38,6 @@ func resourceClusterAws() *schema.Resource { Required: true, ForceNew: true, }, - "context": { - Type: schema.TypeString, - Optional: true, - Default: "project", - ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), - }, "tags": { Type: schema.TypeSet, Optional: true, @@ -51,9 +45,74 @@ func resourceClusterAws() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": schemas.ClusterProfileSchema(), + "cluster_profile_id": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Switch to cluster_profile", + }, + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"cluster_profile_id", "pack"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "spectro", + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "apply_setting": { Type: schema.TypeString, Optional: true, @@ -64,33 +123,26 @@ func resourceClusterAws() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", - Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + Type: schema.TypeString, + Computed: true, }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to apply OS patch on boot. Default is `false`.", + Type: schema.TypeBool, + Optional: true, }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, - Description: "The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, - Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + Type: schema.TypeString, + Computed: true, }, "cloud_config": { Type: schema.TypeList, @@ -117,6 +169,30 @@ func resourceClusterAws() *schema.Resource { }, }, }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "machine_pool": { Type: schema.TypeSet, Required: true, @@ -130,28 +206,43 @@ func resourceClusterAws() *schema.Resource { Type: schema.TypeString, }, }, - "taints": schemas.ClusterTaintsSchema(), + "taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "control_plane": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether this machine pool is a control plane. Defaults to `false`.", + Type: schema.TypeBool, + Optional: true, + Default: false, }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, - //ForceNew: true, - Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, Required: true, }, "count": { - Type: schema.TypeInt, - Required: true, - Description: "Number of nodes in the machine pool.", + Type: schema.TypeInt, + Required: true, }, "instance_type": { Type: schema.TypeString, @@ -185,11 +276,9 @@ func resourceClusterAws() *schema.Resource { Optional: true, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", - Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", - ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", }, "disk_size_gb": { Type: schema.TypeInt, @@ -215,29 +304,140 @@ func resourceClusterAws() *schema.Resource { Required: true, }, }, - "additional_security_groups": { - Type: schema.TypeSet, - Set: schema.HashString, + }, + }, + }, + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, Elem: &schema.Schema{ Type: schema.TypeString, }, - Optional: true, - Description: "Additional security groups to attach to the instance.", }, }, }, }, - "backup_policy": schemas.BackupPolicySchema(), - "scan_policy": schemas.ScanPolicySchema(), - "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), - "namespaces": schemas.ClusterNamespacesSchema(), - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchemaComputed(), + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cluster_rbac_binding": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "role": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subjects": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "namespaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_allocation": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchemaComputed(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + Type: schema.TypeBool, + Optional: true, }, }, } @@ -249,18 +449,14 @@ func resourceClusterAwsCreate(ctx context.Context, d *schema.ResourceData, m int // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster, err := toAwsCluster(c, d) - if err != nil { - return diag.FromErr(err) - } + cluster := toAwsCluster(c, d) - ClusterContext := d.Get("context").(string) - uid, err := c.CreateClusterAws(cluster, ClusterContext) + uid, err := c.CreateClusterAws(cluster) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) if isError { return diagnostics } @@ -275,8 +471,10 @@ func resourceClusterAwsRead(_ context.Context, d *schema.ResourceData, m interfa c := m.(*client.V1Client) var diags diag.Diagnostics - - cluster, err := resourceClusterRead(d, c, diags) + // + uid := d.Id() + // + cluster, err := c.GetCluster(uid) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -294,11 +492,10 @@ func resourceClusterAwsRead(_ context.Context, d *schema.ResourceData, m interfa } func flattenCloudConfigAws(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { - ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigAws(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigAws(configUID); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsAws(config.Spec.MachinePoolConfig) @@ -324,13 +521,12 @@ func flattenMachinePoolConfigsAws(machinePools []*models.V1AwsMachinePoolConfig) FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) FlattenControlPlaneAndRepaveInterval(machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) + oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name oi["count"] = int(machinePool.Size) flattenUpdateStrategy(machinePool.UpdateStrategy, oi) - oi["min"] = int(machinePool.MinSize) - oi["max"] = int(machinePool.MaxSize) oi["instance_type"] = machinePool.InstanceType if machinePool.CapacityType != nil { oi["capacity_type"] = machinePool.CapacityType @@ -344,36 +540,9 @@ func flattenMachinePoolConfigsAws(machinePools []*models.V1AwsMachinePoolConfig) } else { oi["azs"] = machinePool.Azs } - - if machinePool.AdditionalSecurityGroups != nil && len(machinePool.AdditionalSecurityGroups) > 0 { - additionalSecuritygroup := make([]string, 0) - for _, sg := range machinePool.AdditionalSecurityGroups { - additionalSecuritygroup = append(additionalSecuritygroup, sg.ID) - } - oi["additional_security_groups"] = additionalSecuritygroup - } - ois[i] = oi } - sort.SliceStable(ois, func(i, j int) bool { - var controlPlaneI, controlPlaneJ bool - if ois[i].(map[string]interface{})["control_plane"] != nil { - controlPlaneI = ois[i].(map[string]interface{})["control_plane"].(bool) - } - if ois[j].(map[string]interface{})["control_plane"] != nil { - controlPlaneJ = ois[j].(map[string]interface{})["control_plane"].(bool) - } - - // If both are control planes or both are not, sort by name - if controlPlaneI == controlPlaneJ { - return ois[i].(map[string]interface{})["name"].(string) < ois[j].(map[string]interface{})["name"].(string) - } - - // Otherwise, control planes come first - return controlPlaneI && !controlPlaneJ - }) - return ois } @@ -384,7 +553,7 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - ClusterContext := d.Get("context").(string) + if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -420,10 +589,10 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolAws(cloudConfigId, machinePool, ClusterContext) + err = c.CreateMachinePoolAws(cloudConfigId, machinePool) } else if hash != resourceMachinePoolAwsHash(oldMachinePool) { log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolAws(cloudConfigId, machinePool, ClusterContext) + err = c.UpdateMachinePoolAws(cloudConfigId, machinePool) } if err != nil { @@ -433,6 +602,9 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int // Processed (if exists) delete(osMap, name) } + + // Processed (if exists) + delete(osMap, name) } } @@ -441,11 +613,15 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolAws(cloudConfigId, name, ClusterContext); err != nil { + if err := c.DeleteMachinePoolAws(cloudConfigId, name); err != nil { return diag.FromErr(err) } } } + //TODO(saamalik) update for cluster as well + //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + // return diag.FromErr(err) + //} diagnostics, done := updateCommonFields(d, c) if done { @@ -457,14 +633,10 @@ func resourceClusterAwsUpdate(ctx context.Context, d *schema.ResourceData, m int return diags } -func toAwsCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroAwsClusterEntity, error) { +func toAwsCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroAwsClusterEntity { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) - profiles, err := toProfiles(c, d) - if err != nil { - return nil, err - } cluster := &models.V1SpectroAwsClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -473,7 +645,7 @@ func toAwsCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectro }, Spec: &models.V1SpectroAwsClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: profiles, + Profiles: toProfiles(c, d), Policies: toPolicies(d), CloudConfig: &models.V1AwsClusterConfig{ SSHKeyName: cloudConfig["ssh_key_name"].(string), @@ -483,32 +655,20 @@ func toAwsCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectro }, } + //for _, machinePool := range d.Get("machine_pool").([]interface{}) { machinePoolConfigs := make([]*models.V1AwsMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { mp, err := toMachinePoolAws(machinePool, cluster.Spec.CloudConfig.VpcID) if err != nil { - return nil, err + return nil } machinePoolConfigs = append(machinePoolConfigs, mp) } - sort.SliceStable(machinePoolConfigs, func(i, j int) bool { - controlPlaneI := machinePoolConfigs[i].PoolConfig.IsControlPlane - controlPlaneJ := machinePoolConfigs[j].PoolConfig.IsControlPlane - - // If both are control planes or both are not, sort by name - if controlPlaneI == controlPlaneJ { - return *machinePoolConfigs[i].PoolConfig.Name < *machinePoolConfigs[j].PoolConfig.Name - } - - // Otherwise, control planes come first - return controlPlaneI && !controlPlaneJ - }) - cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster, nil + return cluster } func toMachinePoolAws(machinePool interface{}, vpcId string) (*models.V1AwsMachinePoolConfigEntity, error) { diff --git a/spectrocloud/resource_cluster_azure.go b/spectrocloud/resource_cluster_azure.go index 8214e430..5d44401e 100644 --- a/spectrocloud/resource_cluster_azure.go +++ b/spectrocloud/resource_cluster_azure.go @@ -2,19 +2,18 @@ package spectrocloud import ( "context" - "fmt" "log" + "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" "github.com/spectrocloud/terraform-provider-spectrocloud/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" + + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) func resourceClusterAzure() *schema.Resource { @@ -23,7 +22,6 @@ func resourceClusterAzure() *schema.Resource { ReadContext: resourceClusterAzureRead, UpdateContext: resourceClusterAzureUpdate, DeleteContext: resourceClusterDelete, - Description: "Resource for managing Azure clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -33,16 +31,9 @@ func resourceClusterAzure() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the cluster. This name will be used to create the cluster in Azure.", - }, - "context": { - Type: schema.TypeString, - Optional: true, - Default: "project", - ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + Type: schema.TypeString, + Required: true, + ForceNew: true, }, "tags": { Type: schema.TypeSet, @@ -51,48 +42,100 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": schemas.ClusterProfileSchema(), + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"pack"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + //ForceNew: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "spectro", + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "apply_setting": { - Type: schema.TypeString, - Optional: true, - Description: "Apply setting for the cluster. This can be set to `on_create` or `on_update`.", + Type: schema.TypeString, + Optional: true, }, "cloud_account_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "ID of the cloud account to be used for the cluster. This cloud account must be of type `azure`.", + Type: schema.TypeString, + Required: true, + ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", - Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + Type: schema.TypeString, + Computed: true, }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to apply OS patch on boot. Default is `false`.", + Type: schema.TypeBool, + Optional: true, }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, - Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, - Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + Type: schema.TypeString, + Computed: true, }, "cloud_config": { Type: schema.TypeList, @@ -102,24 +145,44 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "subscription_id": { - Type: schema.TypeString, - Required: true, - Description: "Azure subscription ID. This can be found in the Azure portal under `Subscriptions`.", + Type: schema.TypeString, + Required: true, }, "resource_group": { - Type: schema.TypeString, - Required: true, - Description: "Azure resource group. This can be found in the Azure portal under `Resource groups`.", + Type: schema.TypeString, + Required: true, }, "region": { - Type: schema.TypeString, - Required: true, - Description: "Azure region. This can be found in the Azure portal under `Resource groups`.", + Type: schema.TypeString, + Required: true, }, "ssh_key": { - Type: schema.TypeString, - Required: true, - Description: "SSH key to be used for the cluster nodes.", + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, }, }, }, @@ -137,31 +200,47 @@ func resourceClusterAzure() *schema.Resource { Type: schema.TypeString, }, }, - "taints": schemas.ClusterTaintsSchema(), + "taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, - Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, + //ForceNew: true, - Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, Required: true, //ForceNew: true, - Description: "Name of the machine pool. This must be unique within the cluster.", }, "count": { - Type: schema.TypeInt, - Required: true, - Description: "Number of nodes in the machine pool.", + Type: schema.TypeInt, + Required: true, }, "node_repave_interval": { Type: schema.TypeInt, @@ -170,16 +249,13 @@ func resourceClusterAzure() *schema.Resource { Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", }, "instance_type": { - Type: schema.TypeString, - Required: true, - Description: "Azure instance type from the Azure portal.", + Type: schema.TypeString, + Required: true, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", - Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", - ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", }, "disk": { Type: schema.TypeList, @@ -199,18 +275,15 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "size_gb": { - Type: schema.TypeInt, - Required: true, - Description: "Size of the disk in GB.", + Type: schema.TypeInt, + Required: true, }, "type": { - Type: schema.TypeString, - Required: true, - Description: "Type of the disk. Valid values are `Standard_LRS`, `StandardSSD_LRS`, `Premium_LRS`.", + Type: schema.TypeString, + Required: true, }, }, }, - Description: "Disk configuration for the machine pool.", }, "azs": { Type: schema.TypeSet, @@ -219,13 +292,10 @@ func resourceClusterAzure() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "Availability zones for the machine pool.", }, "is_system_node_pool": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether this machine pool is a system node pool. Default value is `false'.", + Type: schema.TypeBool, + Required: true, }, "os_type": { Type: schema.TypeString, @@ -233,25 +303,152 @@ func resourceClusterAzure() *schema.Resource { DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { return false }, - Default: "Linux", - ValidateFunc: validation.StringInSlice([]string{"Linux", "Windows"}, false), - Description: "Operating system type for the machine pool. Valid values are `Linux` and `Windows`. Defaults to `Linux`.", }, }, }, }, - "backup_policy": schemas.BackupPolicySchema(), - "scan_policy": schemas.ScanPolicySchema(), - "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), - "namespaces": schemas.ClusterNamespacesSchema(), - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchemaComputed(), + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cluster_rbac_binding": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "role": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subjects": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "namespaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_allocation": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchemaComputed(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + Type: schema.TypeBool, + Optional: true, }, + //"cloud_config": { + // Type: schema.TypeString, + // Required: true, + // //DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // // return false + // //}, + // //StateFunc: func(val interface{}) string { + // // return strings.ToLower(val.(string)) + // //}, + //}, }, } } @@ -262,22 +459,14 @@ func resourceClusterAzureCreate(ctx context.Context, d *schema.ResourceData, m i // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster, err := toAzureCluster(c, d) - if err != nil { - return diag.FromErr(err) - } - diags = validateMasterPoolCount(cluster.Spec.Machinepoolconfig) - if diags != nil { - return diags - } + cluster := toAzureCluster(c, d) - ClusterContext := d.Get("context").(string) - uid, err := c.CreateClusterAzure(cluster, ClusterContext) + uid, err := c.CreateClusterAzure(cluster) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) if isError { return diagnostics } @@ -293,7 +482,9 @@ func resourceClusterAzureRead(_ context.Context, d *schema.ResourceData, m inter var diags diag.Diagnostics - cluster, err := resourceClusterRead(d, c, diags) + uid := d.Id() + + cluster, err := c.GetCluster(uid) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -311,11 +502,10 @@ func resourceClusterAzureRead(_ context.Context, d *schema.ResourceData, m inter } func flattenCloudConfigAzure(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { - ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigAzure(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigAzure(configUID); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsAzure(config.Spec.MachinePoolConfig) @@ -350,7 +540,7 @@ func flattenMachinePoolConfigsAzure(machinePools []*models.V1AzureMachinePoolCon oi["is_system_node_pool"] = machinePool.IsSystemNodePool oi["azs"] = machinePool.Azs - oi["os_type"] = machinePool.OsType + if machinePool.OsDisk != nil { d := make(map[string]interface{}) d["size_gb"] = machinePool.OsDisk.DiskSizeGB @@ -372,16 +562,8 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - ClusterContext := d.Get("context").(string) + if d.HasChange("machine_pool") { - cluster, err := toAzureCluster(c, d) - if err != nil { - return diag.FromErr(err) - } - diags = validateMasterPoolCount(cluster.Spec.Machinepoolconfig) - if diags != nil { - return diags - } oraw, nraw := d.GetChange("machine_pool") if oraw == nil { oraw = new(schema.Set) @@ -413,10 +595,10 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolAzure(cloudConfigId, ClusterContext, machinePool) + err = c.CreateMachinePoolAzure(cloudConfigId, machinePool) } else if hash != resourceMachinePoolAzureHash(oldMachinePool) { log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolAzure(cloudConfigId, ClusterContext, machinePool) + err = c.UpdateMachinePoolAzure(cloudConfigId, machinePool) } if err != nil { @@ -426,6 +608,7 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i // Processed (if exists) delete(osMap, name) } + } // Deleted old machine pools @@ -433,11 +616,15 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolAzure(cloudConfigId, name, ClusterContext); err != nil { + if err := c.DeleteMachinePoolAzure(cloudConfigId, name); err != nil { return diag.FromErr(err) } } } + //TODO(saamalik) update for cluster as well + //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + // return diag.FromErr(err) + //} diagnostics, done := updateCommonFields(d, c) if done { @@ -449,15 +636,11 @@ func resourceClusterAzureUpdate(ctx context.Context, d *schema.ResourceData, m i return diags } -func toAzureCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroAzureClusterEntity, error) { +func toAzureCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroAzureClusterEntity { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) //clientSecret := strfmt.Password(d.Get("azure_client_secret").(string)) - profiles, err := toProfiles(c, d) - if err != nil { - return nil, err - } cluster := &models.V1SpectroAzureClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -466,7 +649,7 @@ func toAzureCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spect }, Spec: &models.V1SpectroAzureClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: profiles, + Profiles: toProfiles(c, d), Policies: toPolicies(d), CloudConfig: &models.V1AzureClusterConfig{ Location: types.Ptr(cloudConfig["region"].(string)), @@ -482,7 +665,7 @@ func toAzureCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spect for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { mp, err := toMachinePoolAzure(machinePool) if err != nil { - return nil, err + return nil } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -490,7 +673,7 @@ func toAzureCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spect cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster, nil + return cluster } func toMachinePoolAzure(machinePool interface{}) (*models.V1AzureMachinePoolConfigEntity, error) { @@ -570,14 +753,3 @@ func toMachinePoolAzure(machinePool interface{}) (*models.V1AzureMachinePoolConf return mp, nil } - -func validateMasterPoolCount(machinePool []*models.V1AzureMachinePoolConfigEntity) diag.Diagnostics { - for _, machineConfig := range machinePool { - if machineConfig.PoolConfig.IsControlPlane { - if *machineConfig.PoolConfig.Size%2 == 0 { - return diag.FromErr(fmt.Errorf("The master node pool size should be in an odd number. But it set to an even number '%d' in node name '%s' ", *machineConfig.PoolConfig.Size, *machineConfig.PoolConfig.Name)) - } - } - } - return nil -} diff --git a/spectrocloud/resource_cluster_edge.go b/spectrocloud/resource_cluster_edge.go index 3665e532..e0c5e56a 100644 --- a/spectrocloud/resource_cluster_edge.go +++ b/spectrocloud/resource_cluster_edge.go @@ -2,16 +2,18 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" "log" "sort" "strings" "time" + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) @@ -176,7 +178,7 @@ func resourceClusterEdge() *schema.Resource { "machine_pool": { Type: schema.TypeSet, Required: true, - Set: resourceMachinePoolEdgeHash, + Set: resourceMachinePoolEdgeNativeHash, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -457,7 +459,7 @@ func flattenMachinePoolConfigsEdge(machinePools []*models.V1EdgeMachinePoolConfi for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker @@ -511,7 +513,7 @@ func resourceClusterEdgeUpdate(ctx context.Context, d *schema.ResourceData, m in if name == "" { continue } - hash := resourceMachinePoolEdgeHash(machinePoolResource) + hash := resourceMachinePoolEdgeNativeHash(machinePoolResource) machinePool := toMachinePoolEdge(machinePoolResource) @@ -519,7 +521,7 @@ func resourceClusterEdgeUpdate(ctx context.Context, d *schema.ResourceData, m in if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) err = c.CreateMachinePoolEdge(cloudConfigId, machinePool) - } else if hash != resourceMachinePoolEdgeHash(oldMachinePool) { + } else if hash != resourceMachinePoolEdgeNativeHash(oldMachinePool) { log.Printf("Change in machine pool %s", name) err = c.UpdateMachinePoolEdge(cloudConfigId, machinePool) } diff --git a/spectrocloud/resource_cluster_edge_native.go b/spectrocloud/resource_cluster_edge_native.go index 9dea9b92..d548c930 100644 --- a/spectrocloud/resource_cluster_edge_native.go +++ b/spectrocloud/resource_cluster_edge_native.go @@ -2,15 +2,17 @@ package spectrocloud import ( "context" - schemas "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" "log" "strings" "time" + schemas "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) @@ -479,7 +481,7 @@ func flattenMachinePoolConfigsEdgeNative(machinePools []*models.V1EdgeNativeMach for _, machinePool := range machinePools { oi := make(map[string]interface{}) - SetAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker diff --git a/spectrocloud/resource_cluster_edge_vsphere.go b/spectrocloud/resource_cluster_edge_vsphere.go index d6f864ee..7235cf03 100644 --- a/spectrocloud/resource_cluster_edge_vsphere.go +++ b/spectrocloud/resource_cluster_edge_vsphere.go @@ -3,17 +3,17 @@ package spectrocloud import ( "context" "log" + "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" "github.com/spectrocloud/terraform-provider-spectrocloud/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" + + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) func resourceClusterEdgeVsphere() *schema.Resource { @@ -35,12 +35,6 @@ func resourceClusterEdgeVsphere() *schema.Resource { Required: true, ForceNew: true, }, - "context": { - Type: schema.TypeString, - Optional: true, - Default: "project", - ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), - }, "edge_host_uid": { Type: schema.TypeString, Required: true, @@ -53,37 +47,90 @@ func resourceClusterEdgeVsphere() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": schemas.ClusterProfileSchema(), + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"pack"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "spectro", + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", - Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + Type: schema.TypeString, + Computed: true, }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to apply OS patch on boot. Default is `false`.", + Type: schema.TypeBool, + Optional: true, }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, - Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, - Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + Type: schema.TypeString, + Computed: true, }, "cloud_config": { Type: schema.TypeList, @@ -95,6 +142,7 @@ func resourceClusterEdgeVsphere() *schema.Resource { "datacenter": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "folder": { Type: schema.TypeString, @@ -109,36 +157,62 @@ func resourceClusterEdgeVsphere() *schema.Resource { Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"cloud_config.0.ssh_key", "cloud_config.0.ssh_keys"}, - Description: "SSH Key (Secure Shell) to establish, administer, and communicate with remote clusters, `ssh_key & ssh_keys` are mutually exclusive.", }, "ssh_keys": { - Type: schema.TypeSet, - Optional: true, - Set: schema.HashString, - ExactlyOneOf: []string{"cloud_config.0.ssh_key", "cloud_config.0.ssh_keys"}, + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "List of SSH (Secure Shell) to establish, administer, and communicate with remote clusters, `ssh_key & ssh_keys` are mutually exclusive.", + ExactlyOneOf: []string{"cloud_config.0.ssh_key", "cloud_config.0.ssh_keys"}, }, "vip": { Type: schema.TypeString, Required: true, + ForceNew: true, }, "static_ip": { Type: schema.TypeBool, Optional: true, Default: false, + ForceNew: true, }, "network_type": { Type: schema.TypeString, Optional: true, + ForceNew: true, }, "network_search_domain": { Type: schema.TypeString, Optional: true, + ForceNew: true, + }, + }, + }, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, }, }, }, @@ -161,31 +235,44 @@ func resourceClusterEdgeVsphere() *schema.Resource { Type: schema.TypeString, }, }, - "taints": schemas.ClusterTaintsSchema(), + "taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "control_plane": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether this machine pool is a control plane. Defaults to `false`.", + Type: schema.TypeBool, + Optional: true, + Default: false, }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, - //ForceNew: true, - Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "count": { - Type: schema.TypeInt, - Required: true, - Description: "Number of nodes in the machine pool.", + Type: schema.TypeInt, + Required: true, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", - Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", - ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", }, "instance_type": { Type: schema.TypeList, @@ -243,17 +330,137 @@ func resourceClusterEdgeVsphere() *schema.Resource { }, }, }, - "backup_policy": schemas.BackupPolicySchema(), - "scan_policy": schemas.ScanPolicySchema(), - "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), - "namespaces": schemas.ClusterNamespacesSchema(), - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cluster_rbac_binding": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "role": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subjects": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "namespaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_allocation": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + Type: schema.TypeBool, + Optional: true, }, }, } @@ -264,18 +471,14 @@ func resourceClusterEdgeVsphereCreate(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics - cluster, err := toEdgeVsphereCluster(c, d) - if err != nil { - return diag.FromErr(err) - } + cluster := toEdgeVsphereCluster(c, d) - ClusterContext := d.Get("context").(string) - uid, err := c.CreateClusterEdgeVsphere(cluster, ClusterContext) + uid, err := c.CreateClusterEdgeVsphere(cluster) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) if isError { return diagnostics } @@ -290,11 +493,12 @@ func resourceClusterEdgeVsphereRead(_ context.Context, d *schema.ResourceData, m var diags diag.Diagnostics - cluster, err := resourceClusterRead(d, c, diags) + uid := d.Id() + + cluster, err := c.GetCluster(uid) if err != nil { return diag.FromErr(err) } else if cluster == nil { - // Deleted - Terraform will recreate it d.SetId("") return diags } @@ -311,8 +515,7 @@ func flattenCloudConfigEdgeVsphere(configUID string, d *schema.ResourceData, c * if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - ClusterContext := d.Get("context").(string) - if config, err := c.GetCloudConfigVsphere(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigVsphere(configUID); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsEdgeVsphere(config.Spec.MachinePoolConfig) @@ -382,7 +585,10 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - ClusterContext := d.Get("context").(string) + _, err := c.GetCloudConfigEdgeVsphere(cloudConfigId) + if err != nil { + return diag.FromErr(err) + } if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -401,8 +607,11 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat osMap[machinePool["name"].(string)] = machinePool } + nsMap := make(map[string]interface{}) + for _, mp := range ns { machinePoolResource := mp.(map[string]interface{}) + nsMap[machinePoolResource["name"].(string)] = machinePoolResource // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 if machinePoolResource["name"].(string) != "" { name := machinePoolResource["name"].(string) @@ -415,7 +624,7 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) + err = c.CreateMachinePoolVsphere(cloudConfigId, machinePool) } else if hash != resourceMachinePoolVsphereHash(oldMachinePool) { log.Printf("Change in machine pool %s", name) oldMachinePool, _ := toMachinePoolEdgeVsphere(oldMachinePool) @@ -427,7 +636,7 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat } } - err = c.UpdateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) + err = c.UpdateMachinePoolVsphere(cloudConfigId, machinePool) } if err != nil { @@ -442,7 +651,7 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolVsphere(cloudConfigId, name, ClusterContext); err != nil { + if err := c.DeleteMachinePoolVsphere(cloudConfigId, name); err != nil { return diag.FromErr(err) } } @@ -458,15 +667,11 @@ func resourceClusterEdgeVsphereUpdate(ctx context.Context, d *schema.ResourceDat return diags } -func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroVsphereClusterEntity, error) { +func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroVsphereClusterEntity { cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) vip := cloudConfig["vip"].(string) - profiles, err := toProfiles(c, d) - if err != nil { - return nil, err - } cluster := &models.V1SpectroVsphereClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -476,7 +681,8 @@ func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V Spec: &models.V1SpectroVsphereClusterEntitySpec{ EdgeHostUID: d.Get("edge_host_uid").(string), - Profiles: profiles, + + Profiles: toProfiles(c, d), Policies: toPolicies(d), CloudConfig: getClusterConfigEntity(cloudConfig), }, @@ -492,7 +698,7 @@ func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V for _, machinePool := range d.Get("machine_pool").([]interface{}) { mp, err := toMachinePoolEdgeVsphere(machinePool) if err != nil { - return nil, err + return nil } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -504,7 +710,7 @@ func toEdgeVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster, nil + return cluster } func getSSHKey(cloudConfig map[string]interface{}) []string { @@ -528,7 +734,7 @@ func getImageTemplateFolder(cloudConfig map[string]interface{}) string { func getClusterConfigEntity(cloudConfig map[string]interface{}) *models.V1VsphereClusterConfigEntity { clusterConfigEntity := &models.V1VsphereClusterConfigEntity{ - NtpServers: toNtpServers(cloudConfig), + NtpServers: nil, Placement: &models.V1VspherePlacementConfigEntity{ Datacenter: cloudConfig["datacenter"].(string), Folder: cloudConfig["folder"].(string), diff --git a/spectrocloud/resource_cluster_eks.go b/spectrocloud/resource_cluster_eks.go index 173d7eba..76c012e0 100644 --- a/spectrocloud/resource_cluster_eks.go +++ b/spectrocloud/resource_cluster_eks.go @@ -2,16 +2,18 @@ package spectrocloud import ( "context" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" "log" "strings" "time" + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) @@ -1045,3 +1047,18 @@ func toFargateProfileEks(fargateProfile interface{}) *models.V1FargateProfile { return f } + +func setAdditionalSecurityGroups(eksLaunchTemplate map[string]interface{}) []*models.V1AwsResourceReference { + if eksLaunchTemplate["additional_security_groups"] != nil { + securityGroups := expandStringList(eksLaunchTemplate["additional_security_groups"].(*schema.Set).List()) + additionalSecurityGroups := make([]*models.V1AwsResourceReference, 0) + for _, securityGroup := range securityGroups { + additionalSecurityGroups = append(additionalSecurityGroups, &models.V1AwsResourceReference{ + ID: securityGroup, + }) + } + return additionalSecurityGroups + } + + return nil +} diff --git a/spectrocloud/resource_cluster_gcp.go b/spectrocloud/resource_cluster_gcp.go index caad58ce..ff32a59b 100644 --- a/spectrocloud/resource_cluster_gcp.go +++ b/spectrocloud/resource_cluster_gcp.go @@ -3,17 +3,17 @@ package spectrocloud import ( "context" "log" + "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" "github.com/spectrocloud/terraform-provider-spectrocloud/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" + + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) func resourceClusterGcp() *schema.Resource { @@ -22,7 +22,6 @@ func resourceClusterGcp() *schema.Resource { ReadContext: resourceClusterGcpRead, UpdateContext: resourceClusterGcpUpdate, DeleteContext: resourceClusterDelete, - Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -37,12 +36,6 @@ func resourceClusterGcp() *schema.Resource { Required: true, ForceNew: true, }, - "context": { - Type: schema.TypeString, - Optional: true, - Default: "project", - ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), - }, "tags": { Type: schema.TypeSet, Optional: true, @@ -50,9 +43,69 @@ func resourceClusterGcp() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": schemas.ClusterProfileSchema(), + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"pack"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "spectro", + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "apply_setting": { Type: schema.TypeString, Optional: true, @@ -63,33 +116,26 @@ func resourceClusterGcp() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", - Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + Type: schema.TypeString, + Computed: true, }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to apply OS patch on boot. Default is `false`.", + Type: schema.TypeBool, + Optional: true, }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, - Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, - Description: "Date and time after which to patch cluster `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + Type: schema.TypeString, + Computed: true, }, "cloud_config": { Type: schema.TypeList, @@ -113,6 +159,30 @@ func resourceClusterGcp() *schema.Resource { }, }, }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "machine_pool": { Type: schema.TypeSet, Required: true, @@ -126,20 +196,38 @@ func resourceClusterGcp() *schema.Resource { Type: schema.TypeString, }, }, - "taints": schemas.ClusterTaintsSchema(), + "taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, - Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, + //ForceNew: true, - Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, @@ -147,9 +235,8 @@ func resourceClusterGcp() *schema.Resource { //ForceNew: true, }, "count": { - Type: schema.TypeInt, - Required: true, - Description: "Number of nodes in the machine pool.", + Type: schema.TypeInt, + Required: true, }, "node_repave_interval": { Type: schema.TypeInt, @@ -162,11 +249,9 @@ func resourceClusterGcp() *schema.Resource { Required: true, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", - Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", - ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", }, "disk_size_gb": { Type: schema.TypeInt, @@ -185,17 +270,137 @@ func resourceClusterGcp() *schema.Resource { }, }, }, - "backup_policy": schemas.BackupPolicySchema(), - "scan_policy": schemas.ScanPolicySchema(), - "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), - "namespaces": schemas.ClusterNamespacesSchema(), - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchemaComputed(), + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cluster_rbac_binding": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "role": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subjects": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "namespaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_allocation": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchemaComputed(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + Type: schema.TypeBool, + Optional: true, }, }, } @@ -207,18 +412,14 @@ func resourceClusterGcpCreate(ctx context.Context, d *schema.ResourceData, m int // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster, err := toGcpCluster(c, d) - if err != nil { - return diag.FromErr(err) - } + cluster := toGcpCluster(c, d) - ClusterContext := d.Get("context").(string) - uid, err := c.CreateClusterGcp(cluster, ClusterContext) + uid, err := c.CreateClusterGcp(cluster) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) if isError { return diagnostics } @@ -234,7 +435,9 @@ func resourceClusterGcpRead(_ context.Context, d *schema.ResourceData, m interfa var diags diag.Diagnostics - cluster, err := resourceClusterRead(d, c, diags) + uid := d.Id() + + cluster, err := c.GetCluster(uid) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -252,11 +455,10 @@ func resourceClusterGcpRead(_ context.Context, d *schema.ResourceData, m interfa } func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { - ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigGcp(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigGcp(configUID); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsGcp(config.Spec.MachinePoolConfig) @@ -305,7 +507,7 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - ClusterContext := d.Get("context").(string) + if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -338,10 +540,10 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolGcp(cloudConfigId, ClusterContext, machinePool) + err = c.CreateMachinePoolGcp(cloudConfigId, machinePool) } else if hash != resourceMachinePoolGcpHash(oldMachinePool) { log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolGcp(cloudConfigId, ClusterContext, machinePool) + err = c.UpdateMachinePoolGcp(cloudConfigId, machinePool) } if err != nil { @@ -351,6 +553,7 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int // Processed (if exists) delete(osMap, name) } + } // Deleted old machine pools @@ -358,11 +561,15 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolGcp(cloudConfigId, name, ClusterContext); err != nil { + if err := c.DeleteMachinePoolGcp(cloudConfigId, name); err != nil { return diag.FromErr(err) } } } + //TODO(saamalik) update for cluster as well + //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + // return diag.FromErr(err) + //} diagnostics, done := updateCommonFields(d, c) if done { @@ -374,15 +581,11 @@ func resourceClusterGcpUpdate(ctx context.Context, d *schema.ResourceData, m int return diags } -func toGcpCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroGcpClusterEntity, error) { +func toGcpCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroGcpClusterEntity { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) //clientSecret := strfmt.Password(d.Get("gcp_client_secret").(string)) - profiles, err := toProfiles(c, d) - if err != nil { - return nil, err - } cluster := &models.V1SpectroGcpClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -391,7 +594,7 @@ func toGcpCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectro }, Spec: &models.V1SpectroGcpClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: profiles, + Profiles: toProfiles(c, d), Policies: toPolicies(d), CloudConfig: &models.V1GcpClusterConfig{ Network: cloudConfig["network"].(string), @@ -405,7 +608,7 @@ func toGcpCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectro for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { mp, err := toMachinePoolGcp(machinePool) if err != nil { - return nil, err + return nil } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -413,7 +616,7 @@ func toGcpCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectro cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster, nil + return cluster } func toMachinePoolGcp(machinePool interface{}) (*models.V1GcpMachinePoolConfigEntity, error) { diff --git a/spectrocloud/resource_cluster_maas.go b/spectrocloud/resource_cluster_maas.go index bd6532b2..99f55011 100644 --- a/spectrocloud/resource_cluster_maas.go +++ b/spectrocloud/resource_cluster_maas.go @@ -3,17 +3,17 @@ package spectrocloud import ( "context" "log" + "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" "github.com/spectrocloud/terraform-provider-spectrocloud/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" + + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) func resourceClusterMaas() *schema.Resource { @@ -22,7 +22,6 @@ func resourceClusterMaas() *schema.Resource { ReadContext: resourceClusterMaasRead, UpdateContext: resourceClusterMaasUpdate, DeleteContext: resourceClusterDelete, - Description: "Resource for managing MAAS clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -37,12 +36,6 @@ func resourceClusterMaas() *schema.Resource { Required: true, ForceNew: true, }, - "context": { - Type: schema.TypeString, - Optional: true, - Default: "project", - ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), - }, "tags": { Type: schema.TypeSet, Optional: true, @@ -50,9 +43,68 @@ func resourceClusterMaas() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": schemas.ClusterProfileSchema(), + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "spectro", + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "apply_setting": { Type: schema.TypeString, Optional: true, @@ -63,33 +115,26 @@ func resourceClusterMaas() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", - Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + Type: schema.TypeString, + Computed: true, }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to apply OS patch on boot. Default is `false`.", + Type: schema.TypeBool, + Optional: true, }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, - Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, - Description: "The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + Type: schema.TypeString, + Computed: true, }, "cloud_config": { Type: schema.TypeList, @@ -105,6 +150,30 @@ func resourceClusterMaas() *schema.Resource { }, }, }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "machine_pool": { Type: schema.TypeSet, Required: true, @@ -118,20 +187,38 @@ func resourceClusterMaas() *schema.Resource { Type: schema.TypeString, }, }, - "taints": schemas.ClusterTaintsSchema(), + "taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, - Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, + //ForceNew: true, - Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, @@ -177,11 +264,9 @@ func resourceClusterMaas() *schema.Resource { }, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", - Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", - ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", }, "azs": { Type: schema.TypeSet, @@ -211,17 +296,137 @@ func resourceClusterMaas() *schema.Resource { }, }, }, - "backup_policy": schemas.BackupPolicySchema(), - "scan_policy": schemas.ScanPolicySchema(), - "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), - "namespaces": schemas.ClusterNamespacesSchema(), - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cluster_rbac_binding": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "role": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subjects": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "namespaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_allocation": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + Type: schema.TypeBool, + Optional: true, }, }, } @@ -233,18 +438,14 @@ func resourceClusterMaasCreate(ctx context.Context, d *schema.ResourceData, m in // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster, err := toMaasCluster(c, d) - if err != nil { - return diag.FromErr(err) - } + cluster := toMaasCluster(c, d) - ClusterContext := d.Get("context").(string) - uid, err := c.CreateClusterMaas(cluster, ClusterContext) + uid, err := c.CreateClusterMaas(cluster) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) if isError { return diagnostics } @@ -259,8 +460,10 @@ func resourceClusterMaasRead(_ context.Context, d *schema.ResourceData, m interf c := m.(*client.V1Client) var diags diag.Diagnostics - - cluster, err := resourceClusterRead(d, c, diags) + // + uid := d.Id() + // + cluster, err := c.GetCluster(uid) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -278,12 +481,11 @@ func resourceClusterMaasRead(_ context.Context, d *schema.ResourceData, m interf } func flattenCloudConfigMaas(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { - ClusterContext := d.Get("context").(string) err := d.Set("cloud_config_id", configUID) if err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigMaas(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigMaas(configUID); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsMaas(config.Spec.MachinePoolConfig) @@ -314,8 +516,6 @@ func flattenMachinePoolConfigsMaas(machinePools []*models.V1MaasMachinePoolConfi oi["count"] = int(machinePool.Size) flattenUpdateStrategy(machinePool.UpdateStrategy, oi) - oi["min"] = int(machinePool.MinSize) - oi["max"] = int(machinePool.MaxSize) oi["instance_type"] = machinePool.InstanceType if machinePool.InstanceType != nil { @@ -341,7 +541,10 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - ClusterContext := d.Get("context").(string) + _, err := c.GetCloudConfigMaas(cloudConfigId) + if err != nil { + return diag.FromErr(err) + } if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -360,8 +563,11 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in osMap[machinePool["name"].(string)] = machinePool } + nsMap := make(map[string]interface{}) + for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) + nsMap[machinePoolResource["name"].(string)] = machinePoolResource // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 if machinePoolResource["name"].(string) != "" { name := machinePoolResource["name"].(string) @@ -375,10 +581,10 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolMaas(cloudConfigId, ClusterContext, machinePool) + err = c.CreateMachinePoolMaas(cloudConfigId, machinePool) } else if hash != resourceMachinePoolMaasHash(oldMachinePool) { log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolMaas(cloudConfigId, ClusterContext, machinePool) + err = c.UpdateMachinePoolMaas(cloudConfigId, machinePool) } if err != nil { @@ -396,7 +602,7 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolMaas(cloudConfigId, name, ClusterContext); err != nil { + if err := c.DeleteMachinePoolMaas(cloudConfigId, name); err != nil { return diag.FromErr(err) } } @@ -412,15 +618,11 @@ func resourceClusterMaasUpdate(ctx context.Context, d *schema.ResourceData, m in return diags } -func toMaasCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroMaasClusterEntity, error) { +func toMaasCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroMaasClusterEntity { // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) DomainVal := cloudConfig["domain"].(string) - profiles, err := toProfiles(c, d) - if err != nil { - return nil, err - } cluster := &models.V1SpectroMaasClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -429,7 +631,7 @@ func toMaasCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectr }, Spec: &models.V1SpectroMaasClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: profiles, + Profiles: toProfiles(c, d), Policies: toPolicies(d), CloudConfig: &models.V1MaasClusterConfig{ Domain: &DomainVal, @@ -442,7 +644,7 @@ func toMaasCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectr for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { mp, err := toMachinePoolMaas(machinePool) if err != nil { - return nil, err + return nil } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -450,7 +652,7 @@ func toMaasCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spectr cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster, nil + return cluster } func toMachinePoolMaas(machinePool interface{}) (*models.V1MaasMachinePoolConfigEntity, error) { @@ -471,17 +673,6 @@ func toMachinePoolMaas(machinePool interface{}) (*models.V1MaasMachinePoolConfig InstanceType := m["instance_type"].([]interface{})[0].(map[string]interface{}) Placement := m["placement"].([]interface{})[0].(map[string]interface{}) log.Printf("Create machine pool %s", InstanceType) - - min := int32(m["count"].(int)) - max := int32(m["count"].(int)) - - if m["min"] != nil { - min = int32(m["min"].(int)) - } - - if m["max"] != nil { - max = int32(m["max"].(int)) - } mp := &models.V1MaasMachinePoolConfigEntity{ CloudConfig: &models.V1MaasMachinePoolCloudConfigEntity{ Azs: azs, @@ -502,8 +693,6 @@ func toMachinePoolMaas(machinePool interface{}) (*models.V1MaasMachinePoolConfig Type: getUpdateStrategy(m), }, UseControlPlaneAsWorker: controlPlaneAsWorker, - MinSize: min, - MaxSize: max, }, } diff --git a/spectrocloud/resource_cluster_openstack.go b/spectrocloud/resource_cluster_openstack.go index 1b2cfd51..4348daf8 100644 --- a/spectrocloud/resource_cluster_openstack.go +++ b/spectrocloud/resource_cluster_openstack.go @@ -4,17 +4,17 @@ import ( "context" "log" "sort" + "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" "github.com/spectrocloud/terraform-provider-spectrocloud/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" + + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) func resourceClusterOpenStack() *schema.Resource { @@ -23,7 +23,6 @@ func resourceClusterOpenStack() *schema.Resource { ReadContext: resourceClusterOpenStackRead, UpdateContext: resourceClusterOpenStackUpdate, DeleteContext: resourceClusterDelete, - Description: "Resource for managing Openstack clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(180 * time.Minute), @@ -37,12 +36,6 @@ func resourceClusterOpenStack() *schema.Resource { Required: true, ForceNew: true, }, - "context": { - Type: schema.TypeString, - Optional: true, - Default: "project", - ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), - }, "tags": { Type: schema.TypeSet, Optional: true, @@ -50,9 +43,69 @@ func resourceClusterOpenStack() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": schemas.ClusterProfileSchema(), + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"pack"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "spectro", + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "apply_setting": { Type: schema.TypeString, Optional: true, @@ -63,33 +116,26 @@ func resourceClusterOpenStack() *schema.Resource { ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", - Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + Type: schema.TypeString, + Computed: true, }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to apply OS patch on boot. Default is `false`.", + Type: schema.TypeBool, + Optional: true, }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, - Description: "Cron schedule for OS patching. This must be in the form of `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, - Description: "The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + Type: schema.TypeString, + Computed: true, }, "cloud_config": { Type: schema.TypeList, @@ -138,6 +184,30 @@ func resourceClusterOpenStack() *schema.Resource { }, }, }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "machine_pool": { Type: schema.TypeList, Required: true, @@ -150,28 +220,43 @@ func resourceClusterOpenStack() *schema.Resource { Type: schema.TypeString, }, }, - "taints": schemas.ClusterTaintsSchema(), + "taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "control_plane": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether this machine pool is a control plane. Defaults to `false`.", + Type: schema.TypeBool, + Optional: true, + Default: false, }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, - //ForceNew: true, - Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "name": { Type: schema.TypeString, Required: true, }, "count": { - Type: schema.TypeInt, - Required: true, - Description: "Number of nodes in the machine pool.", + Type: schema.TypeInt, + Required: true, }, "node_repave_interval": { Type: schema.TypeInt, @@ -180,11 +265,9 @@ func resourceClusterOpenStack() *schema.Resource { Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", - Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", - ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", }, "instance_type": { Type: schema.TypeString, @@ -204,17 +287,137 @@ func resourceClusterOpenStack() *schema.Resource { }, }, }, - "backup_policy": schemas.BackupPolicySchema(), - "scan_policy": schemas.ScanPolicySchema(), - "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), - "namespaces": schemas.ClusterNamespacesSchema(), - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cluster_rbac_binding": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "role": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subjects": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "namespaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_allocation": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + Type: schema.TypeBool, + Optional: true, }, }, } @@ -226,18 +429,14 @@ func resourceClusterOpenStackCreate(ctx context.Context, d *schema.ResourceData, // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster, err := toOpenStackCluster(c, d) - if err != nil { - return diag.FromErr(err) - } + cluster := toOpenStackCluster(c, d) - ClusterContext := d.Get("context").(string) - uid, err := c.CreateClusterOpenStack(cluster, ClusterContext) + uid, err := c.CreateClusterOpenStack(cluster) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) if isError { return diagnostics } @@ -247,14 +446,10 @@ func resourceClusterOpenStackCreate(ctx context.Context, d *schema.ResourceData, return diags } -func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroOpenStackClusterEntity, error) { +func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroOpenStackClusterEntity { cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) - profiles, err := toProfiles(c, d) - if err != nil { - return nil, err - } cluster := &models.V1SpectroOpenStackClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -263,7 +458,7 @@ func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1S }, Spec: &models.V1SpectroOpenStackClusterEntitySpec{ CloudAccountUID: types.Ptr(d.Get("cloud_account_id").(string)), - Profiles: profiles, + Profiles: toProfiles(c, d), Policies: toPolicies(d), CloudConfig: &models.V1OpenStackClusterConfig{ Region: cloudConfig["region"].(string), @@ -300,7 +495,7 @@ func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1S for _, machinePool := range d.Get("machine_pool").([]interface{}) { mp, err := toMachinePoolOpenStack(machinePool) if err != nil { - return nil, err + return nil } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -313,7 +508,7 @@ func toOpenStackCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1S cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster, nil + return cluster } //goland:noinspection GoUnhandledErrorResult @@ -322,7 +517,9 @@ func resourceClusterOpenStackRead(_ context.Context, d *schema.ResourceData, m i var diags diag.Diagnostics - cluster, err := resourceClusterRead(d, c, diags) + uid := d.Id() + + cluster, err := c.GetCluster(uid) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -335,8 +532,7 @@ func resourceClusterOpenStackRead(_ context.Context, d *schema.ResourceData, m i if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - ClusterContext := d.Get("context").(string) - if config, err := c.GetCloudConfigOpenStack(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigOpenStack(configUID); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsOpenStack(config.Spec.MachinePoolConfig) @@ -389,7 +585,10 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - ClusterContext := d.Get("context").(string) + _, err := c.GetCloudConfigOpenStack(cloudConfigId) + if err != nil { + return diag.FromErr(err) + } if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") if oraw == nil { @@ -407,9 +606,10 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, machinePool := mp.(map[string]interface{}) osMap[machinePool["name"].(string)] = machinePool } - + nsMap := make(map[string]interface{}) for _, mp := range ns { machinePoolResource := mp.(map[string]interface{}) + nsMap[machinePoolResource["name"].(string)] = machinePoolResource // since known issue in TF SDK: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 if machinePoolResource["name"].(string) != "" { name := machinePoolResource["name"].(string) @@ -423,10 +623,10 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolOpenStack(cloudConfigId, ClusterContext, machinePool) + err = c.CreateMachinePoolOpenStack(cloudConfigId, machinePool) } else if hash != resourceMachinePoolOpenStackHash(oldMachinePool) { log.Printf("Change in machine pool %s", name) - err = c.UpdateMachinePoolOpenStack(cloudConfigId, ClusterContext, machinePool) + err = c.UpdateMachinePoolOpenStack(cloudConfigId, machinePool) } if err != nil { @@ -443,7 +643,7 @@ func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolOpenStack(cloudConfigId, name, ClusterContext); err != nil { + if err := c.DeleteMachinePoolOpenStack(cloudConfigId, name); err != nil { return diag.FromErr(err) } } diff --git a/spectrocloud/resource_cluster_vsphere.go b/spectrocloud/resource_cluster_vsphere.go index ba8eb33b..06818fee 100644 --- a/spectrocloud/resource_cluster_vsphere.go +++ b/spectrocloud/resource_cluster_vsphere.go @@ -2,21 +2,19 @@ package spectrocloud import ( "context" - "errors" - "fmt" "log" "sort" "strings" "time" + "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/spectrocloud/hapi/models" - "github.com/spectrocloud/palette-sdk-go/client" - "github.com/spectrocloud/terraform-provider-spectrocloud/spectrocloud/schemas" - "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" ) func resourceClusterVsphere() *schema.Resource { @@ -25,7 +23,6 @@ func resourceClusterVsphere() *schema.Resource { ReadContext: resourceClusterVsphereRead, UpdateContext: resourceClusterVsphereUpdate, DeleteContext: resourceClusterDelete, - Description: "A resource to manage a vSphere cluster in Pallette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(180 * time.Minute), @@ -35,16 +32,9 @@ func resourceClusterVsphere() *schema.Resource { Schema: map[string]*schema.Schema{ "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The name of the cluster.", - }, - "context": { - Type: schema.TypeString, - Optional: true, - Default: "project", - ValidateFunc: validation.StringInSlice([]string{"", "project", "tenant"}, false), + Type: schema.TypeString, + Required: true, + ForceNew: true, }, "tags": { Type: schema.TypeSet, @@ -53,52 +43,104 @@ func resourceClusterVsphere() *schema.Resource { Elem: &schema.Schema{ Type: schema.TypeString, }, - Description: "A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.", }, - "cluster_profile": schemas.ClusterProfileSchema(), + "cluster_profile_id": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Switch to cluster_profile", + }, + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"cluster_profile_id", "pack"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Optional: true, + Default: "spectro", + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, + Optional: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + "manifest": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "content": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // UI strips the trailing newline on save + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "apply_setting": { - Type: schema.TypeString, - Optional: true, - Default: "DownloadAndInstall", - ValidateFunc: validation.StringInSlice([]string{"DownloadAndInstall", "DownloadAndInstallLater"}, false), - Description: "The setting to apply the cluster profile. `DownloadAndInstall` will download and install packs in one action. " + - "`DownloadAndInstallLater` will only download artifact and postpone install for later. " + - "Default value is `DownloadAndInstall`.", + Type: schema.TypeString, + Optional: true, }, "cloud_account_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "ID of the cloud account to be used for the cluster. This cloud account must be of type `vsphere`.", + Type: schema.TypeString, + Required: true, + ForceNew: true, }, "cloud_config_id": { - Type: schema.TypeString, - Computed: true, - Description: "ID of the cloud config used for the cluster. This cloud config must be of type `azure`.", - Deprecated: "This field is deprecated and will be removed in the future. Use `cloud_config` instead.", + Type: schema.TypeString, + Computed: true, }, "os_patch_on_boot": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Whether to apply OS patch on boot. Default is `false`.", + Type: schema.TypeBool, + Optional: true, }, "os_patch_schedule": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchSchedule, - Description: "The cron schedule for OS patching. This must be in the form of cron syntax. Ex: `0 0 * * *`.", }, "os_patch_after": { Type: schema.TypeString, Optional: true, ValidateDiagFunc: validateOsPatchOnDemandAfter, - Description: "The date and time after which to patch the cluster. Prefix the time value with the respective RFC. Ex: `RFC3339: 2006-01-02T15:04:05Z07:00`", }, "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "Kubeconfig for the cluster. This can be used to connect to the cluster using `kubectl`.", + Type: schema.TypeString, + Computed: true, }, "cloud_config": { Type: schema.TypeList, @@ -108,54 +150,61 @@ func resourceClusterVsphere() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "datacenter": { - Type: schema.TypeString, - Required: true, - Description: "The name of the datacenter in vSphere. This is the name of the datacenter as it appears in vSphere.", + Type: schema.TypeString, + Required: true, }, "folder": { - Type: schema.TypeString, - Required: true, - Description: "The name of the folder in vSphere. This is the name of the folder as it appears in vSphere.", + Type: schema.TypeString, + Required: true, }, "image_template_folder": { - Type: schema.TypeString, - Optional: true, - Description: "The name of the image template folder in vSphere. This is the name of the folder as it appears in vSphere.", + Type: schema.TypeString, + Optional: true, }, "ssh_key": { - Type: schema.TypeString, - Required: true, - Description: "The SSH key to be used for the cluster. This is the public key that will be used to access the cluster.", + Type: schema.TypeString, + Required: true, }, "static_ip": { Type: schema.TypeBool, Optional: true, Default: false, - Description: "Whether to use static IP addresses for the cluster. If `true`, the cluster will use static IP addresses. " + - "If `false`, the cluster will use DDNS. Default is `false`.", }, // DHCP Properties "network_type": { - Type: schema.TypeString, - Optional: true, - Description: "The type of network to use for the cluster. This can be `VIP` or `DDNS`.", + Type: schema.TypeString, + Optional: true, }, "network_search_domain": { - Type: schema.TypeString, - Optional: true, - Description: "The search domain to use for the cluster in case of DHCP.", + Type: schema.TypeString, + Optional: true, }, - "ntp_servers": { - Type: schema.TypeSet, + }, + }, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "registry_uid": { + Type: schema.TypeString, Optional: true, - Set: schema.HashString, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "A list of NTP servers to be used by the cluster.", + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, }, }, }, @@ -170,7 +219,6 @@ func resourceClusterVsphere() *schema.Resource { Type: schema.TypeString, Required: true, //ForceNew: true, - Description: "The name of the machine pool. This is used to identify the machine pool in the cluster.", }, "additional_labels": { Type: schema.TypeMap, @@ -179,20 +227,38 @@ func resourceClusterVsphere() *schema.Resource { Type: schema.TypeString, }, }, - "taints": schemas.ClusterTaintsSchema(), + "taints": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "effect": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "control_plane": { Type: schema.TypeBool, Optional: true, Default: false, //ForceNew: true, - Description: "Whether this machine pool is a control plane. Defaults to `false`.", }, "control_plane_as_worker": { Type: schema.TypeBool, Optional: true, Default: false, + //ForceNew: true, - Description: "Whether this machine pool is a control plane and a worker. Defaults to `false`.", }, "node_repave_interval": { Type: schema.TypeInt, @@ -201,16 +267,13 @@ func resourceClusterVsphere() *schema.Resource { Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", }, "count": { - Type: schema.TypeInt, - Required: true, - Description: "Number of nodes in the machine pool.", + Type: schema.TypeInt, + Required: true, }, "update_strategy": { - Type: schema.TypeString, - Optional: true, - Default: "RollingUpdateScaleOut", - Description: "Update strategy for the machine pool. Valid values are `RollingUpdateScaleOut` and `RollingUpdateScaleIn`.", - ValidateFunc: validation.StringInSlice([]string{"RollingUpdateScaleOut", "RollingUpdateScaleIn"}, false), + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", }, "instance_type": { Type: schema.TypeList, @@ -219,19 +282,16 @@ func resourceClusterVsphere() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "disk_size_gb": { - Type: schema.TypeInt, - Required: true, - Description: "The size of the disk in GB.", + Type: schema.TypeInt, + Required: true, }, "memory_mb": { - Type: schema.TypeInt, - Required: true, - Description: "The amount of memory in MB.", + Type: schema.TypeInt, + Required: true, }, "cpu": { - Type: schema.TypeInt, - Required: true, - Description: "The number of CPUs.", + Type: schema.TypeInt, + Required: true, }, }, }, @@ -246,29 +306,131 @@ func resourceClusterVsphere() *schema.Resource { Computed: true, }, "cluster": { - Type: schema.TypeString, - Required: true, - Description: "The name of the cluster to use for the machine pool. As it appears in the vSphere.", + Type: schema.TypeString, + Required: true, }, "resource_pool": { - Type: schema.TypeString, - Required: true, - Description: "The name of the resource pool to use for the machine pool. As it appears in the vSphere.", + Type: schema.TypeString, + Required: true, }, "datastore": { - Type: schema.TypeString, - Required: true, - Description: "The name of the datastore to use for the machine pool. As it appears in the vSphere.", + Type: schema.TypeString, + Required: true, }, "network": { - Type: schema.TypeString, - Required: true, - Description: "The name of the network to use for the machine pool. As it appears in the vSphere.", + Type: schema.TypeString, + Required: true, }, "static_ip_pool_id": { - Type: schema.TypeString, - Optional: true, - Description: "The ID of the static IP pool to use for the machine pool in case of static cluster placement.", + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "cluster_rbac_binding": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + }, + "role": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subjects": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, }, }, }, @@ -276,17 +438,30 @@ func resourceClusterVsphere() *schema.Resource { }, }, }, - "backup_policy": schemas.BackupPolicySchema(), - "scan_policy": schemas.ScanPolicySchema(), - "cluster_rbac_binding": schemas.ClusterRbacBindingSchema(), - "namespaces": schemas.ClusterNamespacesSchema(), - "host_config": schemas.ClusterHostConfigSchema(), - "location_config": schemas.ClusterLocationSchema(), + "namespaces": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "resource_allocation": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "host_config": schemas.ClusterHostConfigSchema(), + "location_config": schemas.ClusterLocationSchema(), "skip_completion": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If `true`, the cluster will be created asynchronously. Default value is `false`.", + Type: schema.TypeBool, + Optional: true, }, }, } @@ -298,18 +473,14 @@ func resourceClusterVsphereCreate(ctx context.Context, d *schema.ResourceData, m // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster, err := toVsphereCluster(c, d) - if err != nil { - return diag.FromErr(err) - } + cluster := toVsphereCluster(c, d) - ClusterContext := d.Get("context").(string) - uid, err := c.CreateClusterVsphere(cluster, ClusterContext) + uid, err := c.CreateClusterVsphere(cluster) if err != nil { return diag.FromErr(err) } - diagnostics, isError := waitForClusterCreation(ctx, d, ClusterContext, uid, diags, c, true) + diagnostics, isError := waitForClusterCreation(ctx, d, uid, diags, c, true) if isError { return diagnostics } @@ -325,7 +496,9 @@ func resourceClusterVsphereRead(_ context.Context, d *schema.ResourceData, m int var diags diag.Diagnostics - cluster, err := resourceClusterRead(d, c, diags) + uid := d.Id() + + cluster, err := c.GetCluster(uid) if err != nil { return diag.FromErr(err) } else if cluster == nil { @@ -338,8 +511,7 @@ func resourceClusterVsphereRead(_ context.Context, d *schema.ResourceData, m int if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - ClusterContext := d.Get("context").(string) - if config, err := c.GetCloudConfigVsphere(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigVsphere(configUID); err != nil { return diag.FromErr(err) } else { mp := flattenMachinePoolConfigsVsphere(config.Spec.MachinePoolConfig) @@ -357,14 +529,13 @@ func resourceClusterVsphereRead(_ context.Context, d *schema.ResourceData, m int } func flattenCloudConfigVsphere(configUID string, d *schema.ResourceData, c *client.V1Client) diag.Diagnostics { - ClusterContext := d.Get("context").(string) if err := d.Set("cloud_config_id", configUID); err != nil { return diag.FromErr(err) } - if config, err := c.GetCloudConfigVsphere(configUID, ClusterContext); err != nil { + if config, err := c.GetCloudConfigVsphere(configUID); err != nil { return diag.FromErr(err) } else { - cloudConfig, err := c.GetCloudConfigVsphereValues(configUID, ClusterContext) + cloudConfig, err := c.GetVsphereClouldConfigValues(configUID) if err != nil { return diag.FromErr(err) } @@ -405,10 +576,6 @@ func flattenClusterConfigsVsphere(cloudConfig *models.V1VsphereCloudConfig) inte ret["network_search_domain"] = cpEndpoint.DdnsSearchDomain } - if cloudConfig.Spec.ClusterConfig.NtpServers != nil { - ret["ntp_servers"] = cloudConfig.Spec.ClusterConfig.NtpServers - } - cloudConfigFlatten = append(cloudConfigFlatten, ret) return cloudConfigFlatten @@ -467,80 +634,6 @@ func flattenMachinePoolConfigsVsphere(machinePools []*models.V1VsphereMachinePoo return ois } -func sortPlacementStructs(structs []interface{}) { - sort.Slice(structs, func(i, j int) bool { - clusterI := structs[i].(map[string]interface{})["cluster"] - clusterJ := structs[j].(map[string]interface{})["cluster"] - if clusterI != clusterJ { - return clusterI.(string) < clusterJ.(string) - } - datastoreI := structs[i].(map[string]interface{})["datastore"] - datastoreJ := structs[j].(map[string]interface{})["datastore"] - if datastoreI != datastoreJ { - return datastoreI.(string) < datastoreJ.(string) - } - resourcePoolI := structs[i].(map[string]interface{})["resource_pool"] - resourcePoolJ := structs[j].(map[string]interface{})["resource_pool"] - if resourcePoolI != resourcePoolJ { - return resourcePoolI.(string) < resourcePoolJ.(string) - } - networkI := structs[i].(map[string]interface{})["network"] - networkJ := structs[j].(map[string]interface{})["network"] - return networkI.(string) < networkJ.(string) - }) -} - -func ValidateMachinePoolChange(oMPool interface{}, nMPool interface{}) (bool, error) { - var oPlacements []interface{} - var nPlacements []interface{} - // Identifying control plane placements from machine pool interface before change - for i, oMachinePool := range oMPool.(*schema.Set).List() { - if oMachinePool.(map[string]interface{})["control_plane"] == true { - oPlacements = oMPool.(*schema.Set).List()[i].(map[string]interface{})["placement"].([]interface{}) - } - } - // Identifying control plane placements from machine pool interface after change - for _, nMachinePool := range nMPool.(*schema.Set).List() { - if nMachinePool.(map[string]interface{})["control_plane"] == true { - nPlacements = nMachinePool.(map[string]interface{})["placement"].([]interface{}) - } - } - // Validating any New or old placements got added/removed. - if len(nPlacements) != len(oPlacements) { - errMsg := `Placement validation error - Adding/Removing placement component in control plane is not allowed. -To update the placement configuration in the control plane, kindly recreate the cluster.` - return true, errors.New(errMsg) - } - - // Need to add sort with all fields - // oPlacements and nPlacements for correct comparison in case order was changed - sortPlacementStructs(oPlacements) - sortPlacementStructs(nPlacements) - - // Validating any New or old placements got changed. - for pIndex, nP := range nPlacements { - oPlacement := oPlacements[pIndex].(map[string]interface{}) - nPlacement := nP.(map[string]interface{}) - if oPlacement["cluster"] != nPlacement["cluster"] { - errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `ComputeCluster` value. Old value - %s, New value - %s ", oPlacement["cluster"], nPlacement["cluster"]) - return true, errors.New(errMsg) - } - if oPlacement["datastore"] != nPlacement["datastore"] { - errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `DataStore` value. Old value - %s, New value - %s ", oPlacement["datastore"], nPlacement["datastore"]) - return true, errors.New(errMsg) - } - if oPlacement["resource_pool"] != nPlacement["resource_pool"] { - errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `resource_pool` value. Old value - %s, New value - %s ", oPlacement["resource_pool"], nPlacement["resource_pool"]) - return true, errors.New(errMsg) - } - if oPlacement["network"] != nPlacement["network"] { - errMsg := fmt.Sprintf("Placement attributes for control_plane cannot be updated, validation error: Trying to update `Network` value. Old value - %s, New value - %s ", oPlacement["network"], nPlacement["network"]) - return true, errors.New(errMsg) - } - } - return false, nil -} - func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { c := m.(*client.V1Client) @@ -548,25 +641,14 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics cloudConfigId := d.Get("cloud_config_id").(string) - ClusterContext := d.Get("context").(string) + if d.HasChange("cloud_config") { - occ, ncc := d.GetChange("cloud_config") - if occ.([]interface{})[0].(map[string]interface{})["datacenter"] != ncc.([]interface{})[0].(map[string]interface{})["datacenter"] { - return diag.Errorf("Validation error: %s", "Datacenter value cannot be updated after cluster provisioning. Kindly destroy and recreate with updated Datacenter attribute.") - } cloudConfig := toCloudConfigUpdate(d.Get("cloud_config").([]interface{})[0].(map[string]interface{})) - if err := c.UpdateCloudConfigVsphereValues(cloudConfigId, ClusterContext, cloudConfig); err != nil { - return diag.FromErr(err) - } + c.UpdateVsphereCloudConfigValues(cloudConfigId, cloudConfig) } if d.HasChange("machine_pool") { oraw, nraw := d.GetChange("machine_pool") - if oraw != nil && nraw != nil { - if ok, err := ValidateMachinePoolChange(oraw, nraw); ok { - return diag.Errorf(err.Error()) - } - } if oraw == nil { oraw = new(schema.Set) } @@ -585,41 +667,38 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m for _, mp := range ns.List() { machinePoolResource := mp.(map[string]interface{}) - if machinePoolResource["name"].(string) != "" { - name := machinePoolResource["name"].(string) - hash := resourceMachinePoolVsphereHash(machinePoolResource) - - var err error - machinePool, err := toMachinePoolVsphere(machinePoolResource) - if err != nil { - return diag.FromErr(err) - } + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolVsphereHash(machinePoolResource) + var err error + machinePool, err := toMachinePoolVsphere(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } - if oldMachinePool, ok := osMap[name]; !ok { - log.Printf("Create machine pool %s", name) - err = c.CreateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) - } else if hash != resourceMachinePoolVsphereHash(oldMachinePool) { - log.Printf("Change in machine pool %s", name) - oldMachinePool, _ := toMachinePoolVsphere(oldMachinePool) - oldPlacements := oldMachinePool.CloudConfig.Placements - - // set the placement ids - for i, p := range machinePool.CloudConfig.Placements { - if len(oldPlacements) > i { - p.UID = oldPlacements[i].UID - } + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolVsphere(cloudConfigId, machinePool) + } else if hash != resourceMachinePoolVsphereHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + oldMachinePool, _ := toMachinePoolVsphere(oldMachinePool) + oldPlacements := oldMachinePool.CloudConfig.Placements + + // set the placement ids + for i, p := range machinePool.CloudConfig.Placements { + if len(oldPlacements) > i { + p.UID = oldPlacements[i].UID } - - err = c.UpdateMachinePoolVsphere(cloudConfigId, ClusterContext, machinePool) } - if err != nil { - return diag.FromErr(err) - } + err = c.UpdateMachinePoolVsphere(cloudConfigId, machinePool) + } - // Processed (if exists) - delete(osMap, name) + if err != nil { + return diag.FromErr(err) } + + // Processed (if exists) + delete(osMap, name) } // Deleted old machine pools @@ -627,11 +706,15 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m machinePool := mp.(map[string]interface{}) name := machinePool["name"].(string) log.Printf("Deleted machine pool %s", name) - if err := c.DeleteMachinePoolVsphere(cloudConfigId, name, ClusterContext); err != nil { + if err := c.DeleteMachinePoolVsphere(cloudConfigId, name); err != nil { return diag.FromErr(err) } } } + //TODO(saamalik) update for cluster as well + //if err := waitForClusterU(ctx, c, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + // return diag.FromErr(err) + //} diagnostics, done := updateCommonFields(d, c) if done { @@ -643,14 +726,11 @@ func resourceClusterVsphereUpdate(ctx context.Context, d *schema.ResourceData, m return diags } -func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroVsphereClusterEntity, error) { +func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroVsphereClusterEntity { + // gnarly, I know! =/ cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) //clientSecret := strfmt.Password(d.Get("azure_client_secret").(string)) - profiles, err := toProfiles(c, d) - if err != nil { - return nil, err - } cluster := &models.V1SpectroVsphereClusterEntity{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), @@ -659,7 +739,7 @@ func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spe }, Spec: &models.V1SpectroVsphereClusterEntitySpec{ CloudAccountUID: d.Get("cloud_account_id").(string), - Profiles: profiles, + Profiles: toProfiles(c, d), Policies: toPolicies(d), CloudConfig: toCloudConfigCreate(cloudConfig), }, @@ -669,11 +749,12 @@ func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spe for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { mp, err := toMachinePoolVsphere(machinePool) if err != nil { - return nil, err + return nil } machinePoolConfigs = append(machinePoolConfigs, mp) } + // sort sort.SliceStable(machinePoolConfigs, func(i, j int) bool { return machinePoolConfigs[i].PoolConfig.IsControlPlane }) @@ -681,7 +762,7 @@ func toVsphereCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1Spe cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster, nil + return cluster } func toCloudConfigCreate(cloudConfig map[string]interface{}) *models.V1VsphereClusterConfigEntity { @@ -701,7 +782,7 @@ func toCloudConfigCreate(cloudConfig map[string]interface{}) *models.V1VsphereCl func toCloudConfigUpdate(cloudConfig map[string]interface{}) *models.V1VsphereCloudClusterConfigEntity { return &models.V1VsphereCloudClusterConfigEntity{ - ClusterConfig: toCloudConfigCreate(cloudConfig), + toCloudConfigCreate(cloudConfig), } } From 913986368d9a4c2965c09583ad840351dc4e9e0b Mon Sep 17 00:00:00 2001 From: nikolay-spectro Date: Fri, 15 Sep 2023 23:53:17 -0700 Subject: [PATCH 3/4] PLT-587: adding node repave interval support for libvirt and edge_vsphere clouds. --- spectrocloud/resource_cluster_edge_vsphere.go | 21 +++++++++++++++++++ spectrocloud/resource_cluster_libvirt.go | 21 +++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/spectrocloud/resource_cluster_edge_vsphere.go b/spectrocloud/resource_cluster_edge_vsphere.go index 7235cf03..f98d42a6 100644 --- a/spectrocloud/resource_cluster_edge_vsphere.go +++ b/spectrocloud/resource_cluster_edge_vsphere.go @@ -269,6 +269,12 @@ func resourceClusterEdgeVsphere() *schema.Resource { Type: schema.TypeInt, Required: true, }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", + }, "update_strategy": { Type: schema.TypeString, Optional: true, @@ -539,6 +545,7 @@ func flattenMachinePoolConfigsEdgeVsphere(machinePools []*models.V1VsphereMachin oi := make(map[string]interface{}) FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name @@ -805,5 +812,19 @@ func toMachinePoolEdgeVsphere(machinePool interface{}) (*models.V1VsphereMachine }, } + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + + } + return mp, nil } diff --git a/spectrocloud/resource_cluster_libvirt.go b/spectrocloud/resource_cluster_libvirt.go index 3e8f5d5a..bb6ece54 100644 --- a/spectrocloud/resource_cluster_libvirt.go +++ b/spectrocloud/resource_cluster_libvirt.go @@ -274,6 +274,12 @@ func resourceClusterLibvirt() *schema.Resource { Type: schema.TypeInt, Required: true, }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", + }, "update_strategy": { Type: schema.TypeString, Optional: true, @@ -638,6 +644,7 @@ func flattenMachinePoolConfigsLibvirt(machinePools []*models.V1LibvirtMachinePoo oi := make(map[string]interface{}) FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) + FlattenControlPlaneAndRepaveInterval(&machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name @@ -949,6 +956,20 @@ func toMachinePoolLibvirt(machinePool interface{}) (*models.V1LibvirtMachinePool }, } + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + + } + return mp, nil } From fd5b93d452ce84f326569f9bf33f59b6c6825b82 Mon Sep 17 00:00:00 2001 From: Sivaanand Murugesan Date: Sun, 17 Sep 2023 20:24:00 +0530 Subject: [PATCH 4/4] added node_repave_support for edge and edge_native --- spectrocloud/resource_cluster_edge.go | 47 ++++++++++++++++---- spectrocloud/resource_cluster_edge_native.go | 36 ++++++++++++--- 2 files changed, 68 insertions(+), 15 deletions(-) diff --git a/spectrocloud/resource_cluster_edge.go b/spectrocloud/resource_cluster_edge.go index e0c5e56a..30573fd3 100644 --- a/spectrocloud/resource_cluster_edge.go +++ b/spectrocloud/resource_cluster_edge.go @@ -213,6 +213,12 @@ func resourceClusterEdge() *schema.Resource { }, }, }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", + }, "control_plane": { Type: schema.TypeBool, Optional: true, @@ -393,7 +399,10 @@ func resourceClusterEdgeCreate(ctx context.Context, d *schema.ResourceData, m in // Warning or errors can be collected in a slice type var diags diag.Diagnostics - cluster := toEdgeCluster(c, d) + cluster, err := toEdgeCluster(c, d) + if err != nil { + return diag.FromErr(err) + } uid, err := c.CreateClusterEdge(cluster) if err != nil { @@ -460,7 +469,7 @@ func flattenMachinePoolConfigsEdge(machinePools []*models.V1EdgeMachinePoolConfi oi := make(map[string]interface{}) FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) - + FlattenControlPlaneAndRepaveInterval(&machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name @@ -515,9 +524,13 @@ func resourceClusterEdgeUpdate(ctx context.Context, d *schema.ResourceData, m in } hash := resourceMachinePoolEdgeNativeHash(machinePoolResource) - machinePool := toMachinePoolEdge(machinePoolResource) - var err error + + machinePool, err := toMachinePoolEdge(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } + if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) err = c.CreateMachinePoolEdge(cloudConfigId, machinePool) @@ -555,7 +568,7 @@ func resourceClusterEdgeUpdate(ctx context.Context, d *schema.ResourceData, m in return diags } -func toEdgeCluster(c *client.V1Client, d *schema.ResourceData) *models.V1SpectroEdgeClusterEntity { +func toEdgeCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1SpectroEdgeClusterEntity, error) { cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) cluster := &models.V1SpectroEdgeClusterEntity{ @@ -575,7 +588,10 @@ func toEdgeCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spectro machinePoolConfigs := make([]*models.V1EdgeMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { - mp := toMachinePoolEdge(machinePool) + mp, err := toMachinePoolEdge(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } @@ -587,10 +603,10 @@ func toEdgeCluster(c *client.V1Client, d *schema.ResourceData) *models.V1Spectro cluster.Spec.Machinepoolconfig = machinePoolConfigs cluster.Spec.ClusterConfig = toClusterConfig(d) - return cluster + return cluster, nil } -func toMachinePoolEdge(machinePool interface{}) *models.V1EdgeMachinePoolConfigEntity { +func toMachinePoolEdge(machinePool interface{}) (*models.V1EdgeMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -627,5 +643,18 @@ func toMachinePoolEdge(machinePool interface{}) *models.V1EdgeMachinePoolConfigE UseControlPlaneAsWorker: controlPlaneAsWorker, }, } - return mp + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + } + + return mp, nil } diff --git a/spectrocloud/resource_cluster_edge_native.go b/spectrocloud/resource_cluster_edge_native.go index 5c71330f..1eddcb7f 100644 --- a/spectrocloud/resource_cluster_edge_native.go +++ b/spectrocloud/resource_cluster_edge_native.go @@ -242,6 +242,12 @@ func resourceClusterEdgeNative() *schema.Resource { }, }, }, + "node_repave_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: "Minimum number of seconds node should be Ready, before the next node is selected for repave. Default value is `0`, Applicable only for worker pools.", + }, "control_plane": { Type: schema.TypeBool, Optional: true, @@ -487,7 +493,7 @@ func flattenMachinePoolConfigsEdgeNative(machinePools []*models.V1EdgeNativeMach oi := make(map[string]interface{}) FlattenAdditionalLabelsAndTaints(machinePool.AdditionalLabels, machinePool.Taints, oi) - + FlattenControlPlaneAndRepaveInterval(&machinePool.IsControlPlane, oi, machinePool.NodeRepaveInterval) oi["control_plane"] = machinePool.IsControlPlane oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker oi["name"] = machinePool.Name @@ -538,9 +544,12 @@ func resourceClusterEdgeNativeUpdate(ctx context.Context, d *schema.ResourceData } hash := resourceMachinePoolEdgeNativeHash(machinePoolResource) - machinePool := toMachinePoolEdgeNative(machinePoolResource) - var err error + machinePool, err := toMachinePoolEdgeNative(machinePoolResource) + if err != nil { + return diag.FromErr(err) + } + if oldMachinePool, ok := osMap[name]; !ok { log.Printf("Create machine pool %s", name) err = c.CreateMachinePoolEdgeNative(cloudConfigId, machinePool) @@ -610,7 +619,10 @@ func toEdgeNativeCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1 machinePoolConfigs := make([]*models.V1EdgeNativeMachinePoolConfigEntity, 0) for _, machinePool := range d.Get("machine_pool").(*schema.Set).List() { - mp := toMachinePoolEdgeNative(machinePool) + mp, err := toMachinePoolEdgeNative(machinePool) + if err != nil { + return nil, err + } machinePoolConfigs = append(machinePoolConfigs, mp) } cluster.Spec.Machinepoolconfig = machinePoolConfigs @@ -619,7 +631,7 @@ func toEdgeNativeCluster(c *client.V1Client, d *schema.ResourceData) (*models.V1 return cluster, nil } -func toMachinePoolEdgeNative(machinePool interface{}) *models.V1EdgeNativeMachinePoolConfigEntity { +func toMachinePoolEdgeNative(machinePool interface{}) (*models.V1EdgeNativeMachinePoolConfigEntity, error) { m := machinePool.(map[string]interface{}) labels := make([]string, 0) @@ -642,7 +654,19 @@ func toMachinePoolEdgeNative(machinePool interface{}) *models.V1EdgeNativeMachin UseControlPlaneAsWorker: controlPlaneAsWorker, }, } - return mp + if !controlPlane { + nodeRepaveInterval := 0 + if m["node_repave_interval"] != nil { + nodeRepaveInterval = m["node_repave_interval"].(int) + } + mp.PoolConfig.NodeRepaveInterval = int32(nodeRepaveInterval) + } else { + err := ValidationNodeRepaveIntervalForControlPlane(m["node_repave_interval"].(int)) + if err != nil { + return mp, err + } + } + return mp, nil } func toEdgeHosts(m map[string]interface{}) *models.V1EdgeNativeMachinePoolCloudConfigEntity {