From bee2723bfc15bf362956dde052e616c2afe7f49c Mon Sep 17 00:00:00 2001 From: Fabian Mettler Date: Sun, 17 Dec 2023 00:46:05 +0100 Subject: [PATCH 1/4] storage: Introduce Storage Bucket management Signed-off-by: Fabian Mettler (cherry picked from commit 9a7fb65aaa3fa4510843de18b6298bbac3cc2e0e) Signed-off-by: Din Music --- docs/resources/storage_bucket.md | 81 +++++ docs/resources/storage_bucket_key.md | 92 +++++ internal/provider/provider.go | 2 + internal/storage/resource_storage_bucket.go | 331 +++++++++++++++++ .../storage/resource_storage_bucket_key.go | 344 ++++++++++++++++++ .../resource_storage_bucket_key_test.go | 236 ++++++++++++ .../storage/resource_storage_bucket_test.go | 162 +++++++++ 7 files changed, 1248 insertions(+) create mode 100644 docs/resources/storage_bucket.md create mode 100644 docs/resources/storage_bucket_key.md create mode 100644 internal/storage/resource_storage_bucket.go create mode 100644 internal/storage/resource_storage_bucket_key.go create mode 100644 internal/storage/resource_storage_bucket_key_test.go create mode 100644 internal/storage/resource_storage_bucket_test.go diff --git a/docs/resources/storage_bucket.md b/docs/resources/storage_bucket.md new file mode 100644 index 00000000..9f67d198 --- /dev/null +++ b/docs/resources/storage_bucket.md @@ -0,0 +1,81 @@ +# lxd_storage_bucket + +Manages an LXD storage bucket. + +## Example Usage + +```hcl +resource "lxd_storage_pool" "pool" { + name = "mypool" + driver = "zfs" +} + +resource "lxd_storage_bucket" "bucket" { + name = "mybucket" + pool = lxd_storage_pool.pool.name +} +``` + +## Argument Reference + +* `name` - **Required** - Name of the storage bucket. + +* `pool` - **Required** - Name of storage pool to host the storage bucket. + +* `description` - *Optional* - Description of the storage bucket. + +* `config` - *Optional* - Map of key/value pairs of + [storage bucket config settings](https://documentation.ubuntu.com/lxd/en/latest/howto/storage_buckets/#configure-storage-bucket-settings). + Note that config settings vary depending on the used storage pool. + +* `project` - *Optional* - Name of the project where the storage bucket will be stored. + +* `remote` - *Optional* - The remote in which the resource will be created. If + not provided, the provider's default remote will be used. + +* `target` - *Optional* - Specify a target node in a cluster. + + +## Attribute Reference + +The following attributes are exported: + +* `location` - Name of the node where storage bucket was created. + +## Importing + +Import ID syntax: `[:][]//` + +* `` - *Optional* - Remote name. +* `` - *Optional* - Project name. +* `` - **Required** - Storage pool name. +* `` - **Required** - Storage bucket name. + +### Import example + +Example using terraform import command: + +```shell +$ terraform import lxd_storage_bucket.bucket proj/mypool/mybucket +``` + +Example using the import block (only available in Terraform v1.5.0 and later): + +```hcl +resource "lxd_storage_bucket" "bucket" { + name = "mybucket" + pool = "mypool" + project = "proj" +} + +import { + to = lxd_storage_bucket.bucket + id = "proj/mypool/mybucket" +} +``` + +## Notes + +* By default, LXD creates each storage bucket with an admin access key and a secret key. + Those keys can be imported using the `lxd_storage_bucket_key` resource. + diff --git a/docs/resources/storage_bucket_key.md b/docs/resources/storage_bucket_key.md new file mode 100644 index 00000000..1e4659f7 --- /dev/null +++ b/docs/resources/storage_bucket_key.md @@ -0,0 +1,92 @@ +# lxd_storage_bucket_key + +Manages an LXD storage bucket key. + +~> **Warning:** The exported attributes `access_key` and `secret_key` are stored in the Terraform state as plain-text. + Read more about [sensitive data in state](https://www.terraform.io/language/state/sensitive-data). + +## Example Usage + +```hcl +resource "lxd_storage_pool" "pool" { + name = "mypool" + driver = "zfs" +} + +resource "lxd_storage_bucket" "bucket" { + name = "mybucket" + pool = lxd_storage_pool.pool1.name +} + +resource "lxd_storage_bucket_key" "key" { + name = "mykey" + pool = lxd_storage_bucket.bucket.pool + bucket = lxd_storage_bucket.bucket.name +} +``` + +## Argument Reference + +* `name` - **Required** - Name of the storage bucket key. + +* `pool` - **Required** - Name of storage pool to host the storage bucket key. + +* `bucket` - **Required** - Name of the storage bucket. + +* `description` - *Optional* - Description of the storage bucket key. + +* `role` - *Optional* - Name of the role that controls the access rights for the key. + If not specified, the default role is used, as described in the [official documentation](https://documentation.ubuntu.com/lxd/en/latest/howto/storage_buckets/#manage-storage-bucket-keys). + +* `project` - *Optional* - Name of the project where the storage bucket key will be stored. + +* `remote` - *Optional* - The remote in which the resource will be created. If not provided, + the provider's default remote will be used. + + +## Attribute Reference + +The following attributes are exported: + +* `access_key` - Access key of the storage bucket key. + +* `secret_key` - Secret key of the storage bucket key. + +## Importing + +Import ID syntax: `[:][]///` + +* `` - *Optional* - Remote name. +* `` - *Optional* - Project name. +* `` - **Required** - Storage pool name. +* `` - **Required** - Storage bucket name. +* `` - **Required** - Storage bucket key name. + +### Import example + +Example using terraform import command: + +```shell +$ terraform import lxd_storage_bucket_key.key proj/mypool/mybucket/mykey +``` + +Example using the import block (only available in Terraform v1.5.0 and later): + +```hcl +resource "lxd_storage_bucket_key" "key" { + name = "mykey" + project = "proj" + pool = "mypool" + bucket = "mybucket" +} + +import { + to = lxd_storage_bucket_key.key + id = "proj/mypool/mybucket/mykey" +} +``` + +## Notes + +* By default, LXD creates each storage bucket with an admin access key and a secret key. + Those keys can be imported if needed. diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 2691a87a..fea16a95 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -260,6 +260,8 @@ func (p *LxdProvider) Resources(_ context.Context) []func() resource.Resource { network.NewNetworkZoneRecordResource, profile.NewProfileResource, project.NewProjectResource, + storage.NewStorageBucketResource, + storage.NewStorageBucketKeyResource, storage.NewStoragePoolResource, storage.NewStorageVolumeResource, storage.NewStorageVolumeCopyResource, diff --git a/internal/storage/resource_storage_bucket.go b/internal/storage/resource_storage_bucket.go new file mode 100644 index 00000000..c0b3aab8 --- /dev/null +++ b/internal/storage/resource_storage_bucket.go @@ -0,0 +1,331 @@ +package storage + +import ( + "context" + "fmt" + + lxd "github.com/canonical/lxd/client" + "github.com/canonical/lxd/shared/api" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/terraform-lxd/terraform-provider-lxd/internal/common" + "github.com/terraform-lxd/terraform-provider-lxd/internal/errors" + provider_config "github.com/terraform-lxd/terraform-provider-lxd/internal/provider-config" +) + +type StorageBucketModel struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Pool types.String `tfsdk:"pool"` + Project types.String `tfsdk:"project"` + Target types.String `tfsdk:"target"` + Remote types.String `tfsdk:"remote"` + Config types.Map `tfsdk:"config"` + + // Computed. + Location types.String `tfsdk:"location"` +} + +// StorageBucketResource represent LXD storage bucket resource. +type StorageBucketResource struct { + provider *provider_config.LxdProviderConfig +} + +// NewStorageBucketResource return a new storage bucket resource. +func NewStorageBucketResource() resource.Resource { + return &StorageBucketResource{} +} + +func (r StorageBucketResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = fmt.Sprintf("%s_storage_bucket", req.ProviderTypeName) +} + +func (r StorageBucketResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "description": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(""), + }, + + "pool": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "project": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + + "remote": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "target": schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + + "config": schema.MapAttribute{ + Optional: true, + Computed: true, + ElementType: types.StringType, + Default: mapdefault.StaticValue(types.MapValueMust(types.StringType, map[string]attr.Value{})), + }, + + // Computed. + + "location": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +func (r *StorageBucketResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + data := req.ProviderData + if data == nil { + return + } + + provider, ok := data.(*provider_config.LxdProviderConfig) + if !ok { + resp.Diagnostics.Append(errors.NewProviderDataTypeError(req.ProviderData)) + return + } + + r.provider = provider +} + +func (r StorageBucketResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan StorageBucketModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + target := plan.Target.ValueString() + server, err := r.provider.InstanceServer(remote, project, target) + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + config, diags := common.ToConfigMap(ctx, plan.Config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.Name.ValueString() + + bucket := api.StorageBucketsPost{ + Name: bucketName, + StorageBucketPut: api.StorageBucketPut{ + Description: plan.Description.ValueString(), + Config: config, + }, + } + + _, err = server.CreateStoragePoolBucket(poolName, bucket) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to create storage bucket %q", bucketName), err.Error()) + return + } + + // Update Terraform state. + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state StorageBucketModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + target := state.Target.ValueString() + server, err := r.provider.InstanceServer(remote, project, target) + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + diags = r.SyncState(ctx, &resp.State, server, state) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan StorageBucketModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + target := plan.Target.ValueString() + server, err := r.provider.InstanceServer(remote, project, target) + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.Name.ValueString() + _, etag, err := server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + config, diags := common.ToConfigMap(ctx, plan.Config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + newBucket := api.StorageBucketPut{ + Config: config, + Description: plan.Description.ValueString(), + } + + err = server.UpdateStoragePoolBucket(poolName, bucketName, newBucket, etag) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to update storage bucket %q", bucketName), err.Error()) + return + } + + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state StorageBucketModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := state.Pool.ValueString() + bucketName := state.Name.ValueString() + err = server.DeleteStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to delete storage bucket %q", bucketName), err.Error()) + return + } +} + +func (r StorageBucketResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + meta := common.ImportMetadata{ + ResourceName: "storage_bucket", + RequiredFields: []string{"pool", "name"}, + } + + fields, diags := meta.ParseImportID(req.ID) + if diags != nil { + resp.Diagnostics.Append(diags) + return + } + + for k, v := range fields { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(k), v)...) + } +} + +// SyncState fetches the server's current state for a storage bucket and +// updates the provided model. It then applies this updated model as the +// new state in Terraform. +func (r StorageBucketResource) SyncState(ctx context.Context, tfState *tfsdk.State, server lxd.InstanceServer, m StorageBucketModel) diag.Diagnostics { + var respDiags diag.Diagnostics + + poolName := m.Pool.ValueString() + bucketName := m.Name.ValueString() + bucket, _, err := server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + if errors.IsNotFoundError(err) { + tfState.RemoveResource(ctx) + return nil + } + + respDiags.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return respDiags + } + + config, diags := common.ToConfigMapType(ctx, bucket.Config) + respDiags.Append(diags...) + + m.Name = types.StringValue(bucket.Name) + m.Location = types.StringValue(bucket.Location) + m.Description = types.StringValue(bucket.Description) + m.Config = config + + m.Target = types.StringValue("") + if server.IsClustered() || bucket.Location != "none" { + m.Target = types.StringValue(bucket.Location) + } + + if respDiags.HasError() { + return respDiags + } + + return tfState.Set(ctx, &m) +} diff --git a/internal/storage/resource_storage_bucket_key.go b/internal/storage/resource_storage_bucket_key.go new file mode 100644 index 00000000..8511e5f4 --- /dev/null +++ b/internal/storage/resource_storage_bucket_key.go @@ -0,0 +1,344 @@ +package storage + +import ( + "context" + "fmt" + + lxd "github.com/canonical/lxd/client" + "github.com/canonical/lxd/shared/api" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/terraform-lxd/terraform-provider-lxd/internal/common" + "github.com/terraform-lxd/terraform-provider-lxd/internal/errors" + provider_config "github.com/terraform-lxd/terraform-provider-lxd/internal/provider-config" +) + +type StorageBucketKeyModel struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Pool types.String `tfsdk:"pool"` + Bucket types.String `tfsdk:"bucket"` + Role types.String `tfsdk:"role"` + Project types.String `tfsdk:"project"` + Remote types.String `tfsdk:"remote"` + + // Computed. + AccessKey types.String `tfsdk:"access_key"` + SecretKey types.String `tfsdk:"secret_key"` +} + +// StorageBucketKeyResource represent LXD storage bucket key resource. +type StorageBucketKeyResource struct { + provider *provider_config.LxdProviderConfig +} + +// NewStorageBucketKeyResource return a new storage bucket key resource. +func NewStorageBucketKeyResource() resource.Resource { + return &StorageBucketKeyResource{} +} + +func (r StorageBucketKeyResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = fmt.Sprintf("%s_storage_bucket_key", req.ProviderTypeName) +} + +// TODO: setup proper schema for storage bucket key like volume for pool! +func (r StorageBucketKeyResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "description": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(""), + }, + + "pool": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "bucket": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "role": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString("read-only"), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.OneOf("admin", "read-only"), + }, + }, + + "project": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + + "remote": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + // Computed. + + "access_key": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + + "secret_key": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + }, + } +} + +func (r *StorageBucketKeyResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + data := req.ProviderData + if data == nil { + return + } + + provider, ok := data.(*provider_config.LxdProviderConfig) + if !ok { + resp.Diagnostics.Append(errors.NewProviderDataTypeError(req.ProviderData)) + return + } + + r.provider = provider +} + +func (r StorageBucketKeyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan StorageBucketKeyModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.Bucket.ValueString() + + // Ensure storage bucket exists. + _, _, err = server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + keyName := plan.Name.ValueString() + + key := api.StorageBucketKeysPost{ + StorageBucketKeyPut: api.StorageBucketKeyPut{ + Description: plan.Description.ValueString(), + Role: plan.Role.ValueString(), + }, + Name: keyName, + } + + _, err = server.CreateStoragePoolBucketKey(poolName, bucketName, key) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to create storage bucket key %q of %q", keyName, bucketName), err.Error()) + return + } + + // Update Terraform state. + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketKeyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state StorageBucketKeyModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + diags = r.SyncState(ctx, &resp.State, server, state) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketKeyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan StorageBucketKeyModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.Bucket.ValueString() + + // Ensure strorage bucket exists. + _, _, err = server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + keyName := plan.Name.ValueString() + key, etag, err := server.GetStoragePoolBucketKey(poolName, bucketName, keyName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return + } + + newKey := api.StorageBucketKeyPut{ + Description: plan.Description.ValueString(), + Role: plan.Role.ValueString(), + // As we do not want to update the access key and the secret key, we provide the existing values for the update. + AccessKey: key.AccessKey, + SecretKey: key.SecretKey, + } + + err = server.UpdateStoragePoolBucketKey(poolName, bucketName, keyName, newKey, etag) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to update storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return + } + + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketKeyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state StorageBucketKeyModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := state.Pool.ValueString() + bucketName := state.Bucket.ValueString() + + // Ensure storage bucket exists. + _, _, err = server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + keyName := state.Name.ValueString() + err = server.DeleteStoragePoolBucketKey(poolName, bucketName, keyName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to delete storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return + } +} + +func (r StorageBucketKeyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + meta := common.ImportMetadata{ + ResourceName: "storage_bucket_key", + RequiredFields: []string{"pool", "bucket", "name"}, + } + + fields, diags := meta.ParseImportID(req.ID) + if diags != nil { + resp.Diagnostics.Append(diags) + return + } + + for k, v := range fields { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(k), v)...) + } +} + +// SyncState fetches the server's current state for a storage bucket key and +// updates the provided model. It then applies this updated model as the +// new state in Terraform. +func (r StorageBucketKeyResource) SyncState(ctx context.Context, tfState *tfsdk.State, server lxd.InstanceServer, m StorageBucketKeyModel) diag.Diagnostics { + var respDiags diag.Diagnostics + + poolName := m.Pool.ValueString() + bucketName := m.Bucket.ValueString() + keyName := m.Name.ValueString() + + key, _, err := server.GetStoragePoolBucketKey(poolName, bucketName, keyName) + if err != nil { + if errors.IsNotFoundError(err) { + tfState.RemoveResource(ctx) + return nil + } + + respDiags.AddError(fmt.Sprintf("Failed to retrieve storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return respDiags + } + + m.Name = types.StringValue(key.Name) + m.Description = types.StringValue(key.Description) + m.Role = types.StringValue(key.Role) + m.AccessKey = types.StringValue(key.AccessKey) + m.SecretKey = types.StringValue(key.SecretKey) + + return tfState.Set(ctx, &m) +} diff --git a/internal/storage/resource_storage_bucket_key_test.go b/internal/storage/resource_storage_bucket_key_test.go new file mode 100644 index 00000000..ecffd10b --- /dev/null +++ b/internal/storage/resource_storage_bucket_key_test.go @@ -0,0 +1,236 @@ +package storage_test + +import ( + "fmt" + "testing" + + petname "github.com/dustinkirkland/golang-petname" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/terraform-lxd/terraform-provider-lxd/internal/acctest" +) + +func TestAccStorageBucketKey_basic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_basic(poolName, bucketName, keyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("lxd_storage_pool.pool1", "name", poolName), + resource.TestCheckResourceAttr("lxd_storage_pool.pool1", "driver", "dir"), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "pool", poolName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "name", keyName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "bucket", bucketName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "pool", poolName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "role", "read-only"), + resource.TestCheckResourceAttrSet("lxd_storage_bucket_key.key1", "access_key"), + resource.TestCheckResourceAttrSet("lxd_storage_bucket_key.key1", "secret_key"), + ), + }, + }, + }) +} + +func TestAccStorageBucketKey_role(t *testing.T) { + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + role := "admin" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_role(bucketName, keyName, role), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "pool", "default"), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "name", keyName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "bucket", bucketName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "pool", "default"), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "role", role), + resource.TestCheckResourceAttrSet("lxd_storage_bucket_key.key1", "access_key"), + resource.TestCheckResourceAttrSet("lxd_storage_bucket_key.key1", "secret_key"), + ), + }, + }, + }) +} + +func TestAccStorageBucketKey_project(t *testing.T) { + projectName := petname.Name() + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_project(projectName, bucketName, keyName), + Check: resource.ComposeTestCheckFunc( + + resource.TestCheckResourceAttr("lxd_project.project1", "name", projectName), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "pool", "default"), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "project", projectName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "name", keyName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "bucket", bucketName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "pool", "default"), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "project", projectName), + resource.TestCheckResourceAttr("lxd_storage_bucket_key.key1", "role", "read-only"), + resource.TestCheckResourceAttrSet("lxd_storage_bucket_key.key1", "access_key"), + resource.TestCheckResourceAttrSet("lxd_storage_bucket_key.key1", "secret_key"), + ), + }, + }, + }) +} + +func TestAccStorageBucketKey_importBasic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + resourceName := "lxd_storage_bucket_key.key1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_basic(poolName, bucketName, keyName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("/%s/%s/%s", poolName, bucketName, keyName), + ImportStateVerifyIdentifierAttribute: "name", + ImportState: true, + ImportStateVerify: true, + ImportStateCheck: func(states []*terraform.InstanceState) error { + if len(states) != 1 { + return fmt.Errorf("expected 1 state, got %d", len(states)) + } + state := states[0] + if state.Attributes["access_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + if state.Attributes["secret_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + return nil + }, + }, + }, + }) +} + +func TestAccStorageBucketKey_importProject(t *testing.T) { + projectName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + resourceName := "lxd_storage_bucket_key.key1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_project(projectName, bucketName, keyName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/default/%s/%s", projectName, bucketName, keyName), + ImportStateVerifyIdentifierAttribute: "name", + ImportStateVerify: true, + ImportState: true, + ImportStateCheck: func(states []*terraform.InstanceState) error { + if len(states) != 1 { + return fmt.Errorf("expected 1 state, got %d", len(states)) + } + state := states[0] + if state.Attributes["access_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + if state.Attributes["secret_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + return nil + }, + }, + }, + }) +} + +func testAccStorageBucketKey_basic(poolName string, bucketName string, keyName string) string { + return fmt.Sprintf(` +resource "lxd_storage_pool" "pool1" { + name = "%s" + driver = "dir" +} + +resource "lxd_storage_bucket" "bucket1" { + name = "%s" + pool = lxd_storage_pool.pool1.name +} + +resource "lxd_storage_bucket_key" "key1" { + name = "%s" + pool = lxd_storage_bucket.bucket1.pool + bucket = lxd_storage_bucket.bucket1.name +} + `, poolName, bucketName, keyName) +} + +func testAccStorageBucketKey_role(bucketName string, keyName string, role string) string { + return fmt.Sprintf(` +resource "lxd_storage_bucket" "bucket1" { + name = "%s" + pool = "default" +} + +resource "lxd_storage_bucket_key" "key1" { + name = "%s" + pool = lxd_storage_bucket.bucket1.pool + bucket = lxd_storage_bucket.bucket1.name + role = "%s" +} + `, bucketName, keyName, role) +} + +func testAccStorageBucketKey_project(projectName string, bucketName string, keyName string) string { + return fmt.Sprintf(` +resource "lxd_project" "project1" { + name = "%s" + config = { + "features.storage.volumes" = false + } +} + +resource "lxd_storage_bucket" "bucket1" { + name = "%s" + pool = "default" + project = lxd_project.project1.name +} + +resource "lxd_storage_bucket_key" "key1" { + name = "%s" + project = lxd_storage_bucket.bucket1.project + pool = lxd_storage_bucket.bucket1.pool + bucket = lxd_storage_bucket.bucket1.name +} + `, projectName, bucketName, keyName) +} diff --git a/internal/storage/resource_storage_bucket_test.go b/internal/storage/resource_storage_bucket_test.go new file mode 100644 index 00000000..7bb5ab2e --- /dev/null +++ b/internal/storage/resource_storage_bucket_test.go @@ -0,0 +1,162 @@ +package storage_test + +import ( + "fmt" + "testing" + + petname "github.com/dustinkirkland/golang-petname" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/terraform-lxd/terraform-provider-lxd/internal/acctest" +) + +func TestAccStorageBucket_basic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(poolName, bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("lxd_storage_pool.pool1", "name", poolName), + resource.TestCheckResourceAttr("lxd_storage_pool.pool1", "driver", "dir"), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "pool", poolName), + ), + }, + }, + }) +} + +func TestAccStorageBucket_target(t *testing.T) { + bucketName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckClustering(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_target(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "pool", "default"), + ), + }, + }, + }) +} + +func TestAccStorageBucket_project(t *testing.T) { + projectName := petname.Name() + bucketName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_project(projectName, bucketName), + Check: resource.ComposeTestCheckFunc( + + resource.TestCheckResourceAttr("lxd_project.project1", "name", projectName), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "pool", "default"), + resource.TestCheckResourceAttr("lxd_storage_bucket.bucket1", "project", projectName), + ), + }, + }, + }) +} + +func TestAccStorageBucket_importBasic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + resourceName := "lxd_storage_bucket.bucket1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(poolName, bucketName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("/%s/%s", poolName, bucketName), + ImportStateVerifyIdentifierAttribute: "name", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageBucket_importProject(t *testing.T) { + projectName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + resourceName := "lxd_storage_bucket.bucket1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_project(projectName, bucketName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/default/%s", projectName, bucketName), + ImportStateVerifyIdentifierAttribute: "name", + ImportStateVerify: true, + ImportState: true, + }, + }, + }) +} + +func testAccStorageBucket_basic(poolName string, bucketName string) string { + return fmt.Sprintf(` +resource "lxd_storage_pool" "pool1" { + name = "%s" + driver = "dir" +} + +resource "lxd_storage_bucket" "bucket1" { + name = "%s" + pool = lxd_storage_pool.pool1.name +} + `, poolName, bucketName) +} + +func testAccStorageBucket_target(bucketName string) string { + return fmt.Sprintf(` +resource "lxd_storage_bucket" "bucket1" { + name = "%s" + pool = "default" + target = "node-2" +} + `, bucketName) +} + +func testAccStorageBucket_project(projectName string, bucketName string) string { + return fmt.Sprintf(` +resource "lxd_project" "project1" { + name = "%s" + config = { + "features.storage.volumes" = false + } +} + +resource "lxd_storage_bucket" "bucket1" { + name = "%s" + pool = "default" + project = lxd_project.project1.name +} + `, projectName, bucketName) +} From d2d4c9d0fa6eb0f52ab3b3a1d0d7b85ef322ccec Mon Sep 17 00:00:00 2001 From: Fabian Mettler Date: Mon, 18 Mar 2024 22:45:07 +0100 Subject: [PATCH 2/4] storage: Refactor SyncState function to merge user-defined config with current config state for Storage Bucket Signed-off-by: Fabian Mettler (cherry picked from commit 9bafd5ea6651099591dfdc9fe62a70e956d7bb65) Signed-off-by: Din Music --- internal/storage/resource_storage_bucket.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/internal/storage/resource_storage_bucket.go b/internal/storage/resource_storage_bucket.go index c0b3aab8..40360834 100644 --- a/internal/storage/resource_storage_bucket.go +++ b/internal/storage/resource_storage_bucket.go @@ -310,7 +310,11 @@ func (r StorageBucketResource) SyncState(ctx context.Context, tfState *tfsdk.Sta return respDiags } - config, diags := common.ToConfigMapType(ctx, bucket.Config) + // Extract user defined config and merge it with current config state. + stateConfig := common.StripConfig(bucket.Config, m.Config, m.ComputedKeys()) + + // Convert config state into schema type. + config, diags := common.ToConfigMapType(ctx, stateConfig, m.Config) respDiags.Append(diags...) m.Name = types.StringValue(bucket.Name) @@ -329,3 +333,12 @@ func (r StorageBucketResource) SyncState(ctx context.Context, tfState *tfsdk.Sta return tfState.Set(ctx, &m) } + +// ComputedKeys returns list of computed config keys. +func (_ StorageBucketModel) ComputedKeys() []string { + return []string{ + "block.filesystem", + "block.mount_options", + "volatile.", + } +} From 8eda5b0e5cbd79db2e3516b9e8ba2c384edb9fbf Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 20 Sep 2024 12:52:04 +0000 Subject: [PATCH 3/4] storage: Add storage_buckets API extension precheck Signed-off-by: Din Music --- .../storage/resource_storage_bucket_key_test.go | 17 ++++++++++++++--- .../storage/resource_storage_bucket_test.go | 17 ++++++++++++++--- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/internal/storage/resource_storage_bucket_key_test.go b/internal/storage/resource_storage_bucket_key_test.go index ecffd10b..1ea4d7e0 100644 --- a/internal/storage/resource_storage_bucket_key_test.go +++ b/internal/storage/resource_storage_bucket_key_test.go @@ -16,7 +16,10 @@ func TestAccStorageBucketKey_basic(t *testing.T) { keyName := petname.Generate(2, "-") resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") + }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ { @@ -46,6 +49,7 @@ func TestAccStorageBucketKey_role(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ @@ -74,6 +78,7 @@ func TestAccStorageBucketKey_project(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ @@ -104,7 +109,10 @@ func TestAccStorageBucketKey_importBasic(t *testing.T) { resourceName := "lxd_storage_bucket_key.key1" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") + }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ { @@ -143,7 +151,10 @@ func TestAccStorageBucketKey_importProject(t *testing.T) { resourceName := "lxd_storage_bucket_key.key1" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") + }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ { diff --git a/internal/storage/resource_storage_bucket_test.go b/internal/storage/resource_storage_bucket_test.go index 7bb5ab2e..b57c06f3 100644 --- a/internal/storage/resource_storage_bucket_test.go +++ b/internal/storage/resource_storage_bucket_test.go @@ -14,7 +14,10 @@ func TestAccStorageBucket_basic(t *testing.T) { bucketName := petname.Generate(2, "-") resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") + }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ { @@ -37,6 +40,7 @@ func TestAccStorageBucket_target(t *testing.T) { PreCheck: func() { acctest.PreCheck(t) acctest.PreCheckClustering(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ @@ -58,6 +62,7 @@ func TestAccStorageBucket_project(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ @@ -80,7 +85,10 @@ func TestAccStorageBucket_importBasic(t *testing.T) { resourceName := "lxd_storage_bucket.bucket1" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") + }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ { @@ -103,7 +111,10 @@ func TestAccStorageBucket_importProject(t *testing.T) { resourceName := "lxd_storage_bucket.bucket1" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckAPIExtensions(t, "storage_buckets") + }, ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, Steps: []resource.TestStep{ { From 7ab7427469f35f208f1325fcdc0ef9e2ab89776e Mon Sep 17 00:00:00 2001 From: Din Music Date: Fri, 20 Sep 2024 13:34:50 +0000 Subject: [PATCH 4/4] .github: Configure MinIO with LXD Signed-off-by: Din Music --- .github/workflows/test.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7e8b8a47..c15ea4b3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -63,6 +63,23 @@ jobs: # Add HTTPS remote. lxc remote add localhost "$(lxc config trust add --name lxd-terraform-provider --quiet)" + - name: Configure MinIO + run: | + arch=$(dpkg --print-architecture) + mkdir -p /opt/minio + + # Download the minio server. + curl -sSfL "https://dl.min.io/server/minio/release/linux-${arch}/minio" --output "/opt/minio/minio" + chmod +x "/opt/minio/minio" + + # Download the minio client. + curl -sSfL "https://dl.min.io/client/mc/release/linux-${arch}/mc" --output "/opt/minio/mc" + chmod +x "/opt/minio/mc" + + # Set the snap config key for minio and reload LXD to have it take effect. + sudo snap set lxd minio.path=/opt/minio + sudo systemctl reload snap.lxd.daemon + - name: Configure OVN run: | sudo apt-get update